diff --git a/.github/ISSUE_TEMPLATE/Bug_report.md b/.github/ISSUE_TEMPLATE/Bug_report.md index 5a4e810dd..e03395f6c 100644 --- a/.github/ISSUE_TEMPLATE/Bug_report.md +++ b/.github/ISSUE_TEMPLATE/Bug_report.md @@ -23,7 +23,7 @@ section if available. ### Docker - + ### Steps to reproduce: diff --git a/CHANGELOG.md b/CHANGELOG.md index aee24fb4b..3bb7b2daa 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -42,7 +42,7 @@ - [#7371](https://github.com/influxdata/telegraf/issues/7371): Fix unable to write metrics to CloudWatch with IMDSv1 disabled. - [#7233](https://github.com/influxdata/telegraf/issues/7233): Fix vSphere 6.7 missing data issue. -- [#7448](https://github.com/influxdata/telegraf/issues/7448): Remove debug fields from spunkmetric serializer. +- [#7448](https://github.com/influxdata/telegraf/issues/7448): Remove debug fields from splunkmetric serializer. - [#7446](https://github.com/influxdata/telegraf/issues/7446): Fix gzip support in socket_listener with tcp sockets. - [#7390](https://github.com/influxdata/telegraf/issues/7390): Fix interval drift when round_interval is set in agent. @@ -280,7 +280,7 @@ - [#6695](https://github.com/influxdata/telegraf/pull/6695): Allow multiple certificates per file in x509_cert input. - [#6686](https://github.com/influxdata/telegraf/pull/6686): Add additional tags to the x509 input. - [#6703](https://github.com/influxdata/telegraf/pull/6703): Add batch data format support to file output. -- [#6688](https://github.com/influxdata/telegraf/pull/6688): Support partition assignement strategy configuration in kafka_consumer. +- [#6688](https://github.com/influxdata/telegraf/pull/6688): Support partition assignment strategy configuration in kafka_consumer. - [#6731](https://github.com/influxdata/telegraf/pull/6731): Add node type tag to mongodb input. - [#6669](https://github.com/influxdata/telegraf/pull/6669): Add uptime_ns field to mongodb input. - [#6735](https://github.com/influxdata/telegraf/pull/6735): Support resolution of symlinks in filecount input. @@ -344,7 +344,7 @@ - [#6445](https://github.com/influxdata/telegraf/issues/6445): Use batch serialization format in exec output. - [#6455](https://github.com/influxdata/telegraf/issues/6455): Build official packages with Go 1.12.10. -- [#6464](https://github.com/influxdata/telegraf/pull/6464): Use case insensitive serial numer match in smart input. +- [#6464](https://github.com/influxdata/telegraf/pull/6464): Use case insensitive serial number match in smart input. - [#6469](https://github.com/influxdata/telegraf/pull/6469): Add auth header only when env var is set. - [#6468](https://github.com/influxdata/telegraf/pull/6468): Fix running multiple mysql and sqlserver plugin instances. - [#6471](https://github.com/influxdata/telegraf/issues/6471): Fix database routing on retry with exclude_database_tag. @@ -378,7 +378,7 @@ #### Release Notes - The cluster health related fields in the elasticsearch input have been split - out from the `elasticsearch_indices` mesasurement into the new + out from the `elasticsearch_indices` measurement into the new `elasticsearch_cluster_health_indices` measurement as they were originally combined by error. @@ -416,7 +416,7 @@ - [#6006](https://github.com/influxdata/telegraf/pull/6006): Add support for interface field in http_response input plugin. - [#5996](https://github.com/influxdata/telegraf/pull/5996): Add container uptime_ns in docker input plugin. - [#6016](https://github.com/influxdata/telegraf/pull/6016): Add better user-facing errors for API timeouts in docker input. -- [#6027](https://github.com/influxdata/telegraf/pull/6027): Add TLS mutal auth support to jti_openconfig_telemetry input. +- [#6027](https://github.com/influxdata/telegraf/pull/6027): Add TLS mutual auth support to jti_openconfig_telemetry input. - [#6053](https://github.com/influxdata/telegraf/pull/6053): Add support for ES 7.x to elasticsearch output. - [#6062](https://github.com/influxdata/telegraf/pull/6062): Add basic auth to prometheus input plugin. - [#6064](https://github.com/influxdata/telegraf/pull/6064): Add node roles tag to elasticsearch input. @@ -784,7 +784,7 @@ - [#5261](https://github.com/influxdata/telegraf/pull/5261): Fix arithmetic overflow in sqlserver input. - [#5194](https://github.com/influxdata/telegraf/issues/5194): Fix latest metrics not sent first when output fails. -- [#5285](https://github.com/influxdata/telegraf/issues/5285): Fix amqp_consumer stops consuming when it receives unparsable messages. +- [#5285](https://github.com/influxdata/telegraf/issues/5285): Fix amqp_consumer stops consuming when it receives unparseable messages. - [#5281](https://github.com/influxdata/telegraf/issues/5281): Fix prometheus input not detecting added and removed pods. - [#5215](https://github.com/influxdata/telegraf/issues/5215): Remove userinfo from cluster tag in couchbase. - [#5298](https://github.com/influxdata/telegraf/issues/5298): Fix internal_write buffer_size not reset on timed writes. @@ -1235,7 +1235,7 @@ ### Release Notes -- The `mysql` input plugin has been updated fix a number of type convertion +- The `mysql` input plugin has been updated fix a number of type conversion issues. This may cause a `field type error` when inserting into InfluxDB due the change of types. @@ -1637,7 +1637,7 @@ - [#3058](https://github.com/influxdata/telegraf/issues/3058): Allow iptable entries with trailing text. - [#1680](https://github.com/influxdata/telegraf/issues/1680): Sanitize password from couchbase metric. - [#3104](https://github.com/influxdata/telegraf/issues/3104): Converge to typed value in prometheus output. -- [#2899](https://github.com/influxdata/telegraf/issues/2899): Skip compilcation of logparser and tail on solaris. +- [#2899](https://github.com/influxdata/telegraf/issues/2899): Skip compilation of logparser and tail on solaris. - [#2951](https://github.com/influxdata/telegraf/issues/2951): Discard logging from tail library. - [#3126](https://github.com/influxdata/telegraf/pull/3126): Remove log message on ping timeout. - [#3144](https://github.com/influxdata/telegraf/issues/3144): Don't retry points beyond retention policy. @@ -2084,7 +2084,7 @@ consistent with the behavior of `collection_jitter`. - [#1390](https://github.com/influxdata/telegraf/pull/1390): Add support for Tengine - [#1320](https://github.com/influxdata/telegraf/pull/1320): Logparser input plugin for parsing grok-style log patterns. - [#1397](https://github.com/influxdata/telegraf/issues/1397): ElasticSearch: now supports connecting to ElasticSearch via SSL -- [#1262](https://github.com/influxdata/telegraf/pull/1261): Add graylog input pluging. +- [#1262](https://github.com/influxdata/telegraf/pull/1261): Add graylog input plugin. - [#1294](https://github.com/influxdata/telegraf/pull/1294): consul input plugin. Thanks @harnash - [#1164](https://github.com/influxdata/telegraf/pull/1164): conntrack input plugin. Thanks @robinpercy! - [#1165](https://github.com/influxdata/telegraf/pull/1165): vmstat input plugin. Thanks @jshim-xm! @@ -2263,7 +2263,7 @@ It is not included on the report path. This is necessary for reporting host disk - [#1041](https://github.com/influxdata/telegraf/issues/1041): Add `n_cpus` field to the system plugin. - [#1072](https://github.com/influxdata/telegraf/pull/1072): New Input Plugin: filestat. - [#1066](https://github.com/influxdata/telegraf/pull/1066): Replication lag metrics for MongoDB input plugin -- [#1086](https://github.com/influxdata/telegraf/pull/1086): Ability to specify AWS keys in config file. Thanks @johnrengleman! +- [#1086](https://github.com/influxdata/telegraf/pull/1086): Ability to specify AWS keys in config file. Thanks @johnrengelman! - [#1096](https://github.com/influxdata/telegraf/pull/1096): Performance refactor of running output buffers. - [#967](https://github.com/influxdata/telegraf/issues/967): Buffer logging improvements. - [#1107](https://github.com/influxdata/telegraf/issues/1107): Support lustre2 job stats. Thanks @hanleyja! @@ -2351,7 +2351,7 @@ because the `value` field is redundant in the graphite/librato context. - [#656](https://github.com/influxdata/telegraf/issues/656): No longer run `lsof` on linux to get netstat data, fixes permissions issue. - [#907](https://github.com/influxdata/telegraf/issues/907): Fix prometheus invalid label/measurement name key. - [#841](https://github.com/influxdata/telegraf/issues/841): Fix memcached unix socket panic. -- [#873](https://github.com/influxdata/telegraf/issues/873): Fix SNMP plugin sometimes not returning metrics. Thanks @titiliambert! +- [#873](https://github.com/influxdata/telegraf/issues/873): Fix SNMP plugin sometimes not returning metrics. Thanks @titilambert! - [#934](https://github.com/influxdata/telegraf/pull/934): phpfpm: Fix fcgi uri path. Thanks @rudenkovk! - [#805](https://github.com/influxdata/telegraf/issues/805): Kafka consumer stops gathering after i/o timeout. - [#959](https://github.com/influxdata/telegraf/pull/959): reduce mongodb & prometheus collection timeouts. Thanks @PierreF! @@ -2362,7 +2362,7 @@ because the `value` field is redundant in the graphite/librato context. - Primarily this release was cut to fix [#859](https://github.com/influxdata/telegraf/issues/859) ### Features -- [#747](https://github.com/influxdata/telegraf/pull/747): Start telegraf on install & remove on uninstall. Thanks @pierref! +- [#747](https://github.com/influxdata/telegraf/pull/747): Start telegraf on install & remove on uninstall. Thanks @PierreF! - [#794](https://github.com/influxdata/telegraf/pull/794): Add service reload ability. Thanks @entertainyou! ### Bugfixes @@ -2850,7 +2850,7 @@ and filtering when specifying a config file. - [#98](https://github.com/influxdata/telegraf/pull/98): LeoFS plugin. Thanks @mocchira! - [#103](https://github.com/influxdata/telegraf/pull/103): Filter by metric tags. Thanks @srfraser! - [#106](https://github.com/influxdata/telegraf/pull/106): Options to filter plugins on startup. Thanks @zepouet! -- [#107](https://github.com/influxdata/telegraf/pull/107): Multiple outputs beyong influxdb. Thanks @jipperinbham! +- [#107](https://github.com/influxdata/telegraf/pull/107): Multiple outputs beyond influxdb. Thanks @jipperinbham! - [#108](https://github.com/influxdata/telegraf/issues/108): Support setting per-CPU and total-CPU gathering. - [#111](https://github.com/influxdata/telegraf/pull/111): Report CPU Usage in cpu plugin. Thanks @jpalay! diff --git a/README.md b/README.md index ec203c1f2..d29ea7df7 100644 --- a/README.md +++ b/README.md @@ -117,7 +117,7 @@ telegraf config > telegraf.conf telegraf --section-filter agent:inputs:outputs --input-filter cpu --output-filter influxdb config ``` -#### Run a single telegraf collection, outputing metrics to stdout: +#### Run a single telegraf collection, outputting metrics to stdout: ``` telegraf --config telegraf.conf --test diff --git a/config/testdata/telegraf-agent.toml b/config/testdata/telegraf-agent.toml index 9da79605f..f71b98206 100644 --- a/config/testdata/telegraf-agent.toml +++ b/config/testdata/telegraf-agent.toml @@ -256,7 +256,7 @@ # specify address via a url matching: # postgres://[pqgotest[:password]]@localhost[/dbname]?sslmode=[disable|verify-ca|verify-full] # or a simple string: - # host=localhost user=pqotest password=... sslmode=... dbname=app_production + # host=localhost user=pqgotest password=... sslmode=... dbname=app_production # # All connection parameters are optional. By default, the host is localhost # and the user is the currently running user. For localhost, we default diff --git a/docs/CONFIGURATION.md b/docs/CONFIGURATION.md index 975c42f14..ca0b3946d 100644 --- a/docs/CONFIGURATION.md +++ b/docs/CONFIGURATION.md @@ -178,7 +178,7 @@ Telegraf plugins are divided into 4 types: [inputs][], [outputs][], [processors][], and [aggregators][]. Unlike the `global_tags` and `agent` tables, any plugin can be defined -multiple times and each instance will run independantly. This allows you to +multiple times and each instance will run independently. This allows you to have plugins defined with differing configurations as needed within a single Telegraf process. diff --git a/docs/METRICS.md b/docs/METRICS.md index 1c238e30a..f903dcad4 100644 --- a/docs/METRICS.md +++ b/docs/METRICS.md @@ -12,7 +12,7 @@ four main components: - **Timestamp**: Date and time associated with the fields. This metric type exists only in memory and must be converted to a concrete -representation in order to be transmitted or viewed. To acheive this we +representation in order to be transmitted or viewed. To achieve this we provide several [output data formats][] sometimes referred to as *serializers*. Our default serializer converts to [InfluxDB Line Protocol][line protocol] which provides a high performance and one-to-one diff --git a/internal/templating/node.go b/internal/templating/node.go index 53d028fd0..bf68509a0 100644 --- a/internal/templating/node.go +++ b/internal/templating/node.go @@ -68,7 +68,7 @@ func (n *node) recursiveSearch(lineParts []string) *Template { // exclude last child from search if it is a wildcard. sort.Search expects // a lexicographically sorted set of children and we have artificially sorted // wildcards to the end of the child set - // wildcards will be searched seperately if no exact match is found + // wildcards will be searched separately if no exact match is found if hasWildcard = n.children[length-1].value == "*"; hasWildcard { length-- } @@ -79,7 +79,7 @@ func (n *node) recursiveSearch(lineParts []string) *Template { // given an exact match is found within children set if i < length && n.children[i].value == lineParts[0] { - // decend into the matching node + // descend into the matching node if tmpl := n.children[i].recursiveSearch(lineParts[1:]); tmpl != nil { // given a template is found return it return tmpl diff --git a/internal/tls/utils.go b/internal/tls/utils.go index 560d07ee2..ddc12d2c1 100644 --- a/internal/tls/utils.go +++ b/internal/tls/utils.go @@ -21,7 +21,7 @@ func ParseCiphers(ciphers []string) ([]uint16, error) { } // ParseTLSVersion returns a `uint16` by received version string key that represents tls version from crypto/tls. -// If version isn't supportes ParseTLSVersion returns 0 with error +// If version isn't supported ParseTLSVersion returns 0 with error func ParseTLSVersion(version string) (uint16, error) { if v, ok := tlsVersionMap[version]; ok { return v, nil diff --git a/internal/usage.go b/internal/usage.go index 124087343..b0df62a6f 100644 --- a/internal/usage.go +++ b/internal/usage.go @@ -48,7 +48,7 @@ Examples: # generate config with only cpu input & influxdb output plugins defined telegraf --input-filter cpu --output-filter influxdb config - # run a single telegraf collection, outputing metrics to stdout + # run a single telegraf collection, outputting metrics to stdout telegraf --config telegraf.conf --test # run telegraf with all plugins defined in config file diff --git a/internal/usage_windows.go b/internal/usage_windows.go index 3ee2f7eff..e205d6c1f 100644 --- a/internal/usage_windows.go +++ b/internal/usage_windows.go @@ -50,7 +50,7 @@ Examples: # generate config with only cpu input & influxdb output plugins defined telegraf --input-filter cpu --output-filter influxdb config - # run a single telegraf collection, outputing metrics to stdout + # run a single telegraf collection, outputting metrics to stdout telegraf --config telegraf.conf --test # run telegraf with all plugins defined in config file diff --git a/metric.go b/metric.go index 1b7dfb6b2..6c7b1c6c5 100644 --- a/metric.go +++ b/metric.go @@ -57,7 +57,7 @@ type Metric interface { Time() time.Time // Type returns a general type for the entire metric that describes how you - // might interprete, aggregate the values. + // might interpret, aggregate the values. // // This method may be removed in the future and its use is discouraged. Type() ValueType diff --git a/models/running_processor_test.go b/models/running_processor_test.go index c24347b8e..4ac4743a7 100644 --- a/models/running_processor_test.go +++ b/models/running_processor_test.go @@ -11,7 +11,7 @@ import ( "github.com/stretchr/testify/require" ) -// MockProcessor is a Processor with an overrideable Apply implementation. +// MockProcessor is a Processor with an overridable Apply implementation. type MockProcessor struct { ApplyF func(in ...telegraf.Metric) []telegraf.Metric } diff --git a/plugins/inputs/amqp_consumer/README.md b/plugins/inputs/amqp_consumer/README.md index 53fca513d..8ef6d6fe2 100644 --- a/plugins/inputs/amqp_consumer/README.md +++ b/plugins/inputs/amqp_consumer/README.md @@ -1,6 +1,6 @@ # AMQP Consumer Input Plugin -This plugin provides a consumer for use with AMQP 0-9-1, a promenent implementation of this protocol being [RabbitMQ](https://www.rabbitmq.com/). +This plugin provides a consumer for use with AMQP 0-9-1, a prominent implementation of this protocol being [RabbitMQ](https://www.rabbitmq.com/). Metrics are read from a topic exchange using the configured queue and binding_key. @@ -41,7 +41,7 @@ The following defaults are known to work with RabbitMQ: ## Additional exchange arguments. # exchange_arguments = { } - # exchange_arguments = {"hash_propery" = "timestamp"} + # exchange_arguments = {"hash_property" = "timestamp"} ## AMQP queue name queue = "telegraf" diff --git a/plugins/inputs/amqp_consumer/amqp_consumer.go b/plugins/inputs/amqp_consumer/amqp_consumer.go index cee425f60..f3ee235e7 100644 --- a/plugins/inputs/amqp_consumer/amqp_consumer.go +++ b/plugins/inputs/amqp_consumer/amqp_consumer.go @@ -116,7 +116,7 @@ func (a *AMQPConsumer) SampleConfig() string { ## Additional exchange arguments. # exchange_arguments = { } - # exchange_arguments = {"hash_propery" = "timestamp"} + # exchange_arguments = {"hash_property" = "timestamp"} ## AMQP queue name. queue = "telegraf" diff --git a/plugins/inputs/cisco_telemetry_gnmi/README.md b/plugins/inputs/cisco_telemetry_gnmi/README.md index 0b003fdef..d12817da1 100644 --- a/plugins/inputs/cisco_telemetry_gnmi/README.md +++ b/plugins/inputs/cisco_telemetry_gnmi/README.md @@ -49,7 +49,7 @@ It has been optimized to support GNMI telemetry as produced by Cisco IOS XR (64- ## See: https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#222-paths ## ## origin usually refers to a (YANG) data model implemented by the device - ## and path to a specific substructe inside it that should be subscribed to (similar to an XPath) + ## and path to a specific substructure inside it that should be subscribed to (similar to an XPath) ## YANG models can be found e.g. here: https://github.com/YangModels/yang/tree/master/vendor/cisco/xr origin = "openconfig-interfaces" path = "/interfaces/interface/state/counters" diff --git a/plugins/inputs/cisco_telemetry_gnmi/cisco_telemetry_gnmi.go b/plugins/inputs/cisco_telemetry_gnmi/cisco_telemetry_gnmi.go index 562c5effa..894b7feb0 100644 --- a/plugins/inputs/cisco_telemetry_gnmi/cisco_telemetry_gnmi.go +++ b/plugins/inputs/cisco_telemetry_gnmi/cisco_telemetry_gnmi.go @@ -515,7 +515,7 @@ const sampleConfig = ` ## See: https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#222-paths ## ## origin usually refers to a (YANG) data model implemented by the device - ## and path to a specific substructe inside it that should be subscribed to (similar to an XPath) + ## and path to a specific substructure inside it that should be subscribed to (similar to an XPath) ## YANG models can be found e.g. here: https://github.com/YangModels/yang/tree/master/vendor/cisco/xr origin = "openconfig-interfaces" path = "/interfaces/interface/state/counters" diff --git a/plugins/inputs/conntrack/README.md b/plugins/inputs/conntrack/README.md index 0eae4b3c3..813bc4861 100644 --- a/plugins/inputs/conntrack/README.md +++ b/plugins/inputs/conntrack/README.md @@ -34,7 +34,7 @@ For more information on conntrack-tools, see the "nf_conntrack_count","nf_conntrack_max"] ## Directories to search within for the conntrack files above. - ## Missing directrories will be ignored. + ## Missing directories will be ignored. dirs = ["/proc/sys/net/ipv4/netfilter","/proc/sys/net/netfilter"] ``` diff --git a/plugins/inputs/conntrack/conntrack.go b/plugins/inputs/conntrack/conntrack.go index 4df01a31f..bf6c021c8 100644 --- a/plugins/inputs/conntrack/conntrack.go +++ b/plugins/inputs/conntrack/conntrack.go @@ -61,7 +61,7 @@ var sampleConfig = ` "nf_conntrack_count","nf_conntrack_max"] ## Directories to search within for the conntrack files above. - ## Missing directrories will be ignored. + ## Missing directories will be ignored. dirs = ["/proc/sys/net/ipv4/netfilter","/proc/sys/net/netfilter"] ` diff --git a/plugins/inputs/consul/README.md b/plugins/inputs/consul/README.md index 72bdeb231..8e1ecc094 100644 --- a/plugins/inputs/consul/README.md +++ b/plugins/inputs/consul/README.md @@ -44,7 +44,7 @@ report those stats already using StatsD protocol if needed. - consul_health_checks - tags: - - node (node that check/service is registred on) + - node (node that check/service is registered on) - service_name - check_id - fields: diff --git a/plugins/inputs/couchbase/README.md b/plugins/inputs/couchbase/README.md index 13eaa02c8..6db7d3db9 100644 --- a/plugins/inputs/couchbase/README.md +++ b/plugins/inputs/couchbase/README.md @@ -12,7 +12,7 @@ ## http://admin:secret@couchbase-0.example.com:8091/ ## ## If no servers are specified, then localhost is used as the host. - ## If no protocol is specifed, HTTP is used. + ## If no protocol is specified, HTTP is used. ## If no port is specified, 8091 is used. servers = ["http://localhost:8091"] ``` diff --git a/plugins/inputs/cpu/cpu_test.go b/plugins/inputs/cpu/cpu_test.go index 34d785350..bf356ec7b 100644 --- a/plugins/inputs/cpu/cpu_test.go +++ b/plugins/inputs/cpu/cpu_test.go @@ -55,7 +55,7 @@ func TestCPUStats(t *testing.T) { err := cs.Gather(&acc) require.NoError(t, err) - // Computed values are checked with delta > 0 because of floating point arithmatic + // Computed values are checked with delta > 0 because of floating point arithmetic // imprecision assertContainsTaggedFloat(t, &acc, "cpu", "time_user", 8.8, 0, cputags) assertContainsTaggedFloat(t, &acc, "cpu", "time_system", 8.2, 0, cputags) @@ -102,7 +102,7 @@ func TestCPUStats(t *testing.T) { assertContainsTaggedFloat(t, &acc, "cpu", "usage_guest_nice", 2.2, 0.0005, cputags) } -// Asserts that a given accumulator contains a measurment of type float64 with +// Asserts that a given accumulator contains a measurement of type float64 with // specific tags within a certain distance of a given expected value. Asserts a failure // if the measurement is of the wrong type, or if no matching measurements are found // @@ -113,7 +113,7 @@ func TestCPUStats(t *testing.T) { // expectedValue float64 : Value to search for within the measurement // delta float64 : Maximum acceptable distance of an accumulated value // from the expectedValue parameter. Useful when -// floating-point arithmatic imprecision makes looking +// floating-point arithmetic imprecision makes looking // for an exact match impractical // tags map[string]string : Tag set the found measurement must have. Set to nil to // ignore the tag set. @@ -225,7 +225,7 @@ func TestCPUTimesDecrease(t *testing.T) { err := cs.Gather(&acc) require.NoError(t, err) - // Computed values are checked with delta > 0 because of floating point arithmatic + // Computed values are checked with delta > 0 because of floating point arithmetic // imprecision assertContainsTaggedFloat(t, &acc, "cpu", "time_user", 18, 0, cputags) assertContainsTaggedFloat(t, &acc, "cpu", "time_idle", 80, 0, cputags) diff --git a/plugins/inputs/dns_query/README.md b/plugins/inputs/dns_query/README.md index 51152a367..dc8ddd903 100644 --- a/plugins/inputs/dns_query/README.md +++ b/plugins/inputs/dns_query/README.md @@ -16,7 +16,7 @@ The DNS plugin gathers dns query times in miliseconds - like [Dig](https://en.wi # domains = ["."] ## Query record type. - ## Posible values: A, AAAA, CNAME, MX, NS, PTR, TXT, SOA, SPF, SRV. + ## Possible values: A, AAAA, CNAME, MX, NS, PTR, TXT, SOA, SPF, SRV. # record_type = "A" ## Dns server port. diff --git a/plugins/inputs/dns_query/dns_query.go b/plugins/inputs/dns_query/dns_query.go index b33e508ea..c56572770 100644 --- a/plugins/inputs/dns_query/dns_query.go +++ b/plugins/inputs/dns_query/dns_query.go @@ -52,7 +52,7 @@ var sampleConfig = ` # domains = ["."] ## Query record type. - ## Posible values: A, AAAA, CNAME, MX, NS, PTR, TXT, SOA, SPF, SRV. + ## Possible values: A, AAAA, CNAME, MX, NS, PTR, TXT, SOA, SPF, SRV. # record_type = "A" ## Dns server port. diff --git a/plugins/inputs/docker/README.md b/plugins/inputs/docker/README.md index 6ec95b64f..95394c94e 100644 --- a/plugins/inputs/docker/README.md +++ b/plugins/inputs/docker/README.md @@ -184,7 +184,7 @@ The above measurements for the devicemapper storage driver can now be found in t - container_status - container_version + fields: - - total_pgmafault + - total_pgmajfault - cache - mapped_file - total_inactive_file diff --git a/plugins/inputs/docker_log/docker_log.go b/plugins/inputs/docker_log/docker_log.go index cf5960b81..bf29ede43 100644 --- a/plugins/inputs/docker_log/docker_log.go +++ b/plugins/inputs/docker_log/docker_log.go @@ -73,7 +73,7 @@ const ( var ( containerStates = []string{"created", "restarting", "running", "removing", "paused", "exited", "dead"} - // ensure *DockerLogs implements telegaf.ServiceInput + // ensure *DockerLogs implements telegraf.ServiceInput _ telegraf.ServiceInput = (*DockerLogs)(nil) ) diff --git a/plugins/inputs/elasticsearch/README.md b/plugins/inputs/elasticsearch/README.md index 57c107cc2..36fd15fe8 100644 --- a/plugins/inputs/elasticsearch/README.md +++ b/plugins/inputs/elasticsearch/README.md @@ -18,7 +18,7 @@ Specific Elasticsearch endpoints that are queried: - Indices Stats: /_all/_stats - Shard Stats: /_all/_stats?level=shards -Note that specific statistics information can change between Elassticsearch versions. In general, this plugin attempts to stay as version-generic as possible by tagging high-level categories only and using a generic json parser to make unique field names of whatever statistics names are provided at the mid-low level. +Note that specific statistics information can change between Elasticsearch versions. In general, this plugin attempts to stay as version-generic as possible by tagging high-level categories only and using a generic json parser to make unique field names of whatever statistics names are provided at the mid-low level. ### Configuration diff --git a/plugins/inputs/exec/README.md b/plugins/inputs/exec/README.md index a8544e1d1..8ed0b5111 100644 --- a/plugins/inputs/exec/README.md +++ b/plugins/inputs/exec/README.md @@ -58,7 +58,7 @@ systems. #### With a PowerShell on Windows, the output of the script appears to be truncated. -You may need to set a variable in your script to increase the numer of columns +You may need to set a variable in your script to increase the number of columns available for output: ``` $host.UI.RawUI.BufferSize = new-object System.Management.Automation.Host.Size(1024,50) diff --git a/plugins/inputs/execd/shim/README.md b/plugins/inputs/execd/shim/README.md index f955ef15f..3bdb69f92 100644 --- a/plugins/inputs/execd/shim/README.md +++ b/plugins/inputs/execd/shim/README.md @@ -5,7 +5,7 @@ out to a stand-alone repo for the purpose of compiling it as a separate app and running it from the inputs.execd plugin. The execd-shim is still experimental and the interface may change in the future. -Especially as the concept expands to prcoessors, aggregators, and outputs. +Especially as the concept expands to processors, aggregators, and outputs. ## Steps to externalize a plugin diff --git a/plugins/inputs/filecount/filecount_test.go b/plugins/inputs/filecount/filecount_test.go index 96d8f0c3b..568ee07b5 100644 --- a/plugins/inputs/filecount/filecount_test.go +++ b/plugins/inputs/filecount/filecount_test.go @@ -203,7 +203,7 @@ func getFakeFileSystem(basePath string) fakeFileSystem { mtime := time.Date(2015, time.December, 14, 18, 25, 5, 0, time.UTC) olderMtime := time.Date(2010, time.December, 14, 18, 25, 5, 0, time.UTC) - // set file permisions + // set file permissions var fmask uint32 = 0666 var dmask uint32 = 0666 diff --git a/plugins/inputs/filecount/filesystem_helpers_test.go b/plugins/inputs/filecount/filesystem_helpers_test.go index de028dcab..4e7d16e16 100644 --- a/plugins/inputs/filecount/filesystem_helpers_test.go +++ b/plugins/inputs/filecount/filesystem_helpers_test.go @@ -72,7 +72,7 @@ func getTestFileSystem() fakeFileSystem { mtime := time.Date(2015, time.December, 14, 18, 25, 5, 0, time.UTC) - // set file permisions + // set file permissions var fmask uint32 = 0666 var dmask uint32 = 0666 diff --git a/plugins/inputs/fluentd/fluentd.go b/plugins/inputs/fluentd/fluentd.go index c99960740..7d4a0cd5e 100644 --- a/plugins/inputs/fluentd/fluentd.go +++ b/plugins/inputs/fluentd/fluentd.go @@ -53,7 +53,7 @@ type pluginData struct { // parse JSON from fluentd Endpoint // Parameters: -// data: unprocessed json recivied from endpoint +// data: unprocessed json received from endpoint // // Returns: // pluginData: slice that contains parsed plugins @@ -76,7 +76,7 @@ func parse(data []byte) (datapointArray []pluginData, err error) { // Description - display description func (h *Fluentd) Description() string { return description } -// SampleConfig - generate configuretion +// SampleConfig - generate configuration func (h *Fluentd) SampleConfig() string { return sampleConfig } // Gather - Main code responsible for gathering, processing and creating metrics diff --git a/plugins/inputs/github/README.md b/plugins/inputs/github/README.md index 65fda0301..46127082e 100644 --- a/plugins/inputs/github/README.md +++ b/plugins/inputs/github/README.md @@ -46,7 +46,7 @@ When the [internal][] input is enabled: + internal_github - tags: - - access_token - An obfusticated reference to the configured access token or "Unauthenticated" + - access_token - An obfuscated reference to the configured access token or "Unauthenticated" - fields: - limit - How many requests you are limited to (per hour) - remaining - How many requests you have remaining (per hour) diff --git a/plugins/inputs/graylog/README.md b/plugins/inputs/graylog/README.md index 46712ea1e..acb191f8b 100644 --- a/plugins/inputs/graylog/README.md +++ b/plugins/inputs/graylog/README.md @@ -7,7 +7,7 @@ Plugin currently support two type of end points:- - multiple (Ex http://[graylog-server-ip]:12900/system/metrics/multiple) - namespace (Ex http://[graylog-server-ip]:12900/system/metrics/namespace/{namespace}) -End Point can be a mixe of one multiple end point and several namespaces end points +End Point can be a mix of one multiple end point and several namespaces end points Note: if namespace end point specified metrics array will be ignored for that call. diff --git a/plugins/inputs/graylog/graylog.go b/plugins/inputs/graylog/graylog.go index 1e0439a42..4309c6481 100644 --- a/plugins/inputs/graylog/graylog.go +++ b/plugins/inputs/graylog/graylog.go @@ -47,7 +47,7 @@ type HTTPClient interface { // req: HTTP request object // // Returns: - // http.Response: HTTP respons object + // http.Response: HTTP response object // error : Any error that may have occurred MakeRequest(req *http.Request) (*http.Response, error) diff --git a/plugins/inputs/http_response/http_response.go b/plugins/inputs/http_response/http_response.go index 7ba141c23..a6e1d74b5 100644 --- a/plugins/inputs/http_response/http_response.go +++ b/plugins/inputs/http_response/http_response.go @@ -274,7 +274,7 @@ func (h *HTTPResponse) httpGather(u string) (map[string]interface{}, map[string] // Get error details netErr := setError(err, fields, tags) - // If recognize the returnded error, get out + // If recognize the returned error, get out if netErr != nil { return fields, tags, nil } diff --git a/plugins/inputs/http_response/http_response_test.go b/plugins/inputs/http_response/http_response_test.go index 947fde5c8..ac483127d 100644 --- a/plugins/inputs/http_response/http_response_test.go +++ b/plugins/inputs/http_response/http_response_test.go @@ -722,7 +722,7 @@ func TestNetworkErrors(t *testing.T) { absentTags := []string{"status_code"} checkOutput(t, &acc, expectedFields, expectedTags, absentFields, absentTags) - // Connecton failed + // Connection failed h = &HTTPResponse{ Log: testutil.Logger{}, Address: "https:/nonexistent.nonexistent", // Any non-routable IP works here diff --git a/plugins/inputs/httpjson/httpjson.go b/plugins/inputs/httpjson/httpjson.go index e09eafc94..7feff1a84 100644 --- a/plugins/inputs/httpjson/httpjson.go +++ b/plugins/inputs/httpjson/httpjson.go @@ -42,7 +42,7 @@ type HTTPClient interface { // req: HTTP request object // // Returns: - // http.Response: HTTP respons object + // http.Response: HTTP response object // error : Any error that may have occurred MakeRequest(req *http.Request) (*http.Response, error) diff --git a/plugins/inputs/ipmi_sensor/README.md b/plugins/inputs/ipmi_sensor/README.md index 6c93bd15e..2fd7cc707 100644 --- a/plugins/inputs/ipmi_sensor/README.md +++ b/plugins/inputs/ipmi_sensor/README.md @@ -44,7 +44,7 @@ ipmitool -I lan -H SERVER -U USERID -P PASSW0RD sdr ## # servers = ["USERID:PASSW0RD@lan(192.168.1.1)"] - ## Recomended: use metric 'interval' that is a multiple of 'timeout' to avoid + ## Recommended: use metric 'interval' that is a multiple of 'timeout' to avoid ## gaps or overlap in pulled data interval = "30s" diff --git a/plugins/inputs/jenkins/jenkins.go b/plugins/inputs/jenkins/jenkins.go index b18dc5430..a909f5ea4 100644 --- a/plugins/inputs/jenkins/jenkins.go +++ b/plugins/inputs/jenkins/jenkins.go @@ -137,7 +137,7 @@ func (j *Jenkins) newHTTPClient() (*http.Client, error) { }, nil } -// seperate the client as dependency to use httptest Client for mocking +// separate the client as dependency to use httptest Client for mocking func (j *Jenkins) initialize(client *http.Client) error { var err error diff --git a/plugins/inputs/jenkins/jenkins_test.go b/plugins/inputs/jenkins/jenkins_test.go index bf8ffb19d..b8284fc0d 100644 --- a/plugins/inputs/jenkins/jenkins_test.go +++ b/plugins/inputs/jenkins/jenkins_test.go @@ -75,8 +75,8 @@ func TestResultCode(t *testing.T) { } type mockHandler struct { - // responseMap is the path to repsonse interface - // we will ouput the serialized response in json when serving http + // responseMap is the path to response interface + // we will output the serialized response in json when serving http // example '/computer/api/json': *gojenkins. responseMap map[string]interface{} } diff --git a/plugins/inputs/jolokia2/gatherer.go b/plugins/inputs/jolokia2/gatherer.go index 5005e8225..5b2aa00d8 100644 --- a/plugins/inputs/jolokia2/gatherer.go +++ b/plugins/inputs/jolokia2/gatherer.go @@ -43,7 +43,7 @@ func (g *Gatherer) Gather(client *Client, acc telegraf.Accumulator) error { return nil } -// gatherReponses adds points to an accumulator from the ReadResponse objects +// gatherResponses adds points to an accumulator from the ReadResponse objects // returned by a Jolokia agent. func (g *Gatherer) gatherResponses(responses []ReadResponse, tags map[string]string, acc telegraf.Accumulator) { series := make(map[string][]point, 0) @@ -144,7 +144,7 @@ func metricMatchesResponse(metric Metric, response ReadResponse) bool { return false } -// compactPoints attepts to remove points by compacting points +// compactPoints attempts to remove points by compacting points // with matching tag sets. When a match is found, the fields from // one point are moved to another, and the empty point is removed. func compactPoints(points []point) []point { diff --git a/plugins/inputs/jti_openconfig_telemetry/oc/oc.pb.go b/plugins/inputs/jti_openconfig_telemetry/oc/oc.pb.go index a4cd76cc4..bc7c78045 100644 --- a/plugins/inputs/jti_openconfig_telemetry/oc/oc.pb.go +++ b/plugins/inputs/jti_openconfig_telemetry/oc/oc.pb.go @@ -980,7 +980,7 @@ type OpenConfigTelemetryClient interface { // The device should send telemetry data back on the same // connection as the subscription request. TelemetrySubscribe(ctx context.Context, in *SubscriptionRequest, opts ...grpc.CallOption) (OpenConfigTelemetry_TelemetrySubscribeClient, error) - // Terminates and removes an exisiting telemetry subscription + // Terminates and removes an existing telemetry subscription CancelTelemetrySubscription(ctx context.Context, in *CancelSubscriptionRequest, opts ...grpc.CallOption) (*CancelSubscriptionReply, error) // Get the list of current telemetry subscriptions from the // target. This command returns a list of existing subscriptions @@ -1076,7 +1076,7 @@ type OpenConfigTelemetryServer interface { // The device should send telemetry data back on the same // connection as the subscription request. TelemetrySubscribe(*SubscriptionRequest, OpenConfigTelemetry_TelemetrySubscribeServer) error - // Terminates and removes an exisiting telemetry subscription + // Terminates and removes an existing telemetry subscription CancelTelemetrySubscription(context.Context, *CancelSubscriptionRequest) (*CancelSubscriptionReply, error) // Get the list of current telemetry subscriptions from the // target. This command returns a list of existing subscriptions diff --git a/plugins/inputs/jti_openconfig_telemetry/oc/oc.proto b/plugins/inputs/jti_openconfig_telemetry/oc/oc.proto index 38ce9b422..cf4aa145e 100644 --- a/plugins/inputs/jti_openconfig_telemetry/oc/oc.proto +++ b/plugins/inputs/jti_openconfig_telemetry/oc/oc.proto @@ -44,7 +44,7 @@ service OpenConfigTelemetry { // connection as the subscription request. rpc telemetrySubscribe(SubscriptionRequest) returns (stream OpenConfigData) {} - // Terminates and removes an exisiting telemetry subscription + // Terminates and removes an existing telemetry subscription rpc cancelTelemetrySubscription(CancelSubscriptionRequest) returns (CancelSubscriptionReply) {} // Get the list of current telemetry subscriptions from the diff --git a/plugins/inputs/kinesis_consumer/README.md b/plugins/inputs/kinesis_consumer/README.md index d6f3a707b..7896557ac 100644 --- a/plugins/inputs/kinesis_consumer/README.md +++ b/plugins/inputs/kinesis_consumer/README.md @@ -78,7 +78,7 @@ DynamoDB: #### DynamoDB Checkpoint The DynamoDB checkpoint stores the last processed record in a DynamoDB. To leverage -this functionality, create a table with the folowing string type keys: +this functionality, create a table with the following string type keys: ``` Partition key: namespace diff --git a/plugins/inputs/kubernetes/README.md b/plugins/inputs/kubernetes/README.md index 2d38f23d9..a574bed06 100644 --- a/plugins/inputs/kubernetes/README.md +++ b/plugins/inputs/kubernetes/README.md @@ -116,7 +116,7 @@ Architecture][k8s-telegraf] or view the Helm charts: - rootfs_available_bytes - rootfs_capacity_bytes - rootfs_used_bytes - - logsfs_avaialble_bytes + - logsfs_available_bytes - logsfs_capacity_bytes - logsfs_used_bytes @@ -146,7 +146,7 @@ Architecture][k8s-telegraf] or view the Helm charts: ``` kubernetes_node -kubernetes_pod_container,container_name=deis-controller,namespace=deis,node_name=ip-10-0-0-0.ec2.internal,pod_name=deis-controller-3058870187-xazsr cpu_usage_core_nanoseconds=2432835i,cpu_usage_nanocores=0i,logsfs_avaialble_bytes=121128271872i,logsfs_capacity_bytes=153567944704i,logsfs_used_bytes=20787200i,memory_major_page_faults=0i,memory_page_faults=175i,memory_rss_bytes=0i,memory_usage_bytes=0i,memory_working_set_bytes=0i,rootfs_available_bytes=121128271872i,rootfs_capacity_bytes=153567944704i,rootfs_used_bytes=1110016i 1476477530000000000 +kubernetes_pod_container,container_name=deis-controller,namespace=deis,node_name=ip-10-0-0-0.ec2.internal,pod_name=deis-controller-3058870187-xazsr cpu_usage_core_nanoseconds=2432835i,cpu_usage_nanocores=0i,logsfs_available_bytes=121128271872i,logsfs_capacity_bytes=153567944704i,logsfs_used_bytes=20787200i,memory_major_page_faults=0i,memory_page_faults=175i,memory_rss_bytes=0i,memory_usage_bytes=0i,memory_working_set_bytes=0i,rootfs_available_bytes=121128271872i,rootfs_capacity_bytes=153567944704i,rootfs_used_bytes=1110016i 1476477530000000000 kubernetes_pod_network,namespace=deis,node_name=ip-10-0-0-0.ec2.internal,pod_name=deis-controller-3058870187-xazsr rx_bytes=120671099i,rx_errors=0i,tx_bytes=102451983i,tx_errors=0i 1476477530000000000 kubernetes_pod_volume,volume_name=default-token-f7wts,namespace=default,node_name=ip-172-17-0-1.internal,pod_name=storage-7 available_bytes=8415240192i,capacity_bytes=8415252480i,used_bytes=12288i 1546910783000000000 kubernetes_system_container diff --git a/plugins/inputs/kubernetes/kubernetes_metrics.go b/plugins/inputs/kubernetes/kubernetes_metrics.go index 96814bcbe..d45d4b5f1 100644 --- a/plugins/inputs/kubernetes/kubernetes_metrics.go +++ b/plugins/inputs/kubernetes/kubernetes_metrics.go @@ -2,7 +2,7 @@ package kubernetes import "time" -// SummaryMetrics represents all the summary data about a paritcular node retrieved from a kubelet +// SummaryMetrics represents all the summary data about a particular node retrieved from a kubelet type SummaryMetrics struct { Node NodeMetrics `json:"node"` Pods []PodMetrics `json:"pods"` diff --git a/plugins/inputs/lustre2/lustre2.go b/plugins/inputs/lustre2/lustre2.go index 4af999b71..611ba294d 100644 --- a/plugins/inputs/lustre2/lustre2.go +++ b/plugins/inputs/lustre2/lustre2.go @@ -366,7 +366,7 @@ func (l *Lustre2) GetLustreProcStats(fileglob string, wantedFields []*mapping, a for _, file := range files { /* Turn /proc/fs/lustre/obdfilter//stats and similar * into just the object store target name - * Assumpion: the target name is always second to last, + * Assumption: the target name is always second to last, * which is true in Lustre 2.1->2.8 */ path := strings.Split(file, "/") diff --git a/plugins/inputs/mesos/mesos.go b/plugins/inputs/mesos/mesos.go index 741dd73dc..4ce68e604 100644 --- a/plugins/inputs/mesos/mesos.go +++ b/plugins/inputs/mesos/mesos.go @@ -242,7 +242,7 @@ func metricsDiff(role Role, w []string) []string { return b } -// masterBlocks serves as kind of metrics registry groupping them in sets +// masterBlocks serves as kind of metrics registry grouping them in sets func getMetrics(role Role, group string) []string { var m map[string][]string diff --git a/plugins/inputs/minecraft/README.md b/plugins/inputs/minecraft/README.md index 933d8bb05..e27fca9ba 100644 --- a/plugins/inputs/minecraft/README.md +++ b/plugins/inputs/minecraft/README.md @@ -78,7 +78,7 @@ minecraft,player=dinnerbone,source=127.0.0.1,port=25575 deaths=1i,jumps=1999i,co minecraft,player=jeb,source=127.0.0.1,port=25575 d_pickaxe=1i,damage_dealt=80i,d_sword=2i,hunger=20i,health=20i,kills=1i,level=33i,jumps=264i,armor=15i 1498261397000000000 ``` -[server.properies]: https://minecraft.gamepedia.com/Server.properties +[server.properties]: https://minecraft.gamepedia.com/Server.properties [scoreboard]: http://minecraft.gamepedia.com/Scoreboard [objectives]: https://minecraft.gamepedia.com/Scoreboard#Objectives [rcon]: http://wiki.vg/RCON diff --git a/plugins/inputs/minecraft/internal/rcon/rcon.go b/plugins/inputs/minecraft/internal/rcon/rcon.go index a57d75629..345583a06 100644 --- a/plugins/inputs/minecraft/internal/rcon/rcon.go +++ b/plugins/inputs/minecraft/internal/rcon/rcon.go @@ -57,7 +57,7 @@ type Packet struct { Body string // Body of packet. } -// Compile converts a packets header and body into its approriate +// Compile converts a packets header and body into its appropriate // byte array payload, returning an error if the binary packages // Write method fails to write the header bytes in their little // endian byte order. @@ -112,7 +112,7 @@ func (c *Client) Execute(command string) (response *Packet, err error) { // Sends accepts the commands type and its string to execute to the clients server, // creating a packet with a random challenge id for the server to mirror, -// and compiling its payload bytes in the appropriate order. The resonse is +// and compiling its payload bytes in the appropriate order. The response is // decompiled from its bytes into a Packet type for return. An error is returned // if send fails. func (c *Client) Send(typ int32, command string) (response *Packet, err error) { @@ -152,7 +152,7 @@ func (c *Client) Send(typ int32, command string) (response *Packet, err error) { } if packet.Header.Type == Auth && header.Type == ResponseValue { - // Discard, empty SERVERDATA_RESPOSE_VALUE from authorization. + // Discard, empty SERVERDATA_RESPONSE_VALUE from authorization. c.Connection.Read(make([]byte, header.Size-int32(PacketHeaderSize))) // Reread the packet header. diff --git a/plugins/inputs/mongodb/README.md b/plugins/inputs/mongodb/README.md index ba2e9148e..1bbc05847 100644 --- a/plugins/inputs/mongodb/README.md +++ b/plugins/inputs/mongodb/README.md @@ -215,7 +215,7 @@ by running Telegraf with the `--debug` argument. - repl_inserts_per_sec (integer, deprecated in 1.10; use `repl_inserts`)) - repl_queries_per_sec (integer, deprecated in 1.10; use `repl_queries`)) - repl_updates_per_sec (integer, deprecated in 1.10; use `repl_updates`)) - - ttl_deletes_per_sec (integer, deprecated in 1.10; use `ttl_deltes`)) + - ttl_deletes_per_sec (integer, deprecated in 1.10; use `ttl_deletes`)) - ttl_passes_per_sec (integer, deprecated in 1.10; use `ttl_passes`)) - updates_per_sec (integer, deprecated in 1.10; use `updates`)) @@ -247,7 +247,7 @@ by running Telegraf with the `--debug` argument. - total_index_size (integer) - ok (integer) - count (integer) - - type (tring) + - type (string) - mongodb_shard_stats - tags: diff --git a/plugins/inputs/mongodb/mongostat.go b/plugins/inputs/mongodb/mongostat.go index 820ea7bd3..5d64d7ab4 100644 --- a/plugins/inputs/mongodb/mongostat.go +++ b/plugins/inputs/mongodb/mongostat.go @@ -1,7 +1,7 @@ /*** The code contained here came from https://github.com/mongodb/mongo-tools/blob/master/mongostat/stat_types.go and contains modifications so that no other dependency from that project is needed. Other modifications included -removing uneccessary code specific to formatting the output and determine the current state of the database. It +removing unnecessary code specific to formatting the output and determine the current state of the database. It is licensed under Apache Version 2.0, http://www.apache.org/licenses/LICENSE-2.0.html ***/ @@ -317,7 +317,7 @@ type NetworkStats struct { NumRequests int64 `bson:"numRequests"` } -// OpcountStats stores information related to comamnds and basic CRUD operations. +// OpcountStats stores information related to commands and basic CRUD operations. type OpcountStats struct { Insert int64 `bson:"insert"` Query int64 `bson:"query"` @@ -691,7 +691,7 @@ type StatLine struct { CacheDirtyPercent float64 CacheUsedPercent float64 - // Cache ultilization extended (wiredtiger only) + // Cache utilization extended (wiredtiger only) TrackedDirtyBytes int64 CurrentCachedBytes int64 MaxBytesConfigured int64 diff --git a/plugins/inputs/monit/README.md b/plugins/inputs/monit/README.md index 9abd657d5..be116394d 100644 --- a/plugins/inputs/monit/README.md +++ b/plugins/inputs/monit/README.md @@ -41,7 +41,7 @@ Minimum Version of Monit tested with is 5.16. - address - version - service - - paltform_name + - platform_name - status - monitoring_status - monitoring_mode @@ -62,7 +62,7 @@ Minimum Version of Monit tested with is 5.16. - address - version - service - - paltform_name + - platform_name - status - monitoring_status - monitoring_mode @@ -77,7 +77,7 @@ Minimum Version of Monit tested with is 5.16. - address - version - service - - paltform_name + - platform_name - status - monitoring_status - monitoring_mode @@ -93,7 +93,7 @@ Minimum Version of Monit tested with is 5.16. - address - version - service - - paltform_name + - platform_name - status - monitoring_status - monitoring_mode @@ -117,7 +117,7 @@ Minimum Version of Monit tested with is 5.16. - address - version - service - - paltform_name + - platform_name - status - monitoring_status - monitoring_mode @@ -136,7 +136,7 @@ Minimum Version of Monit tested with is 5.16. - address - version - service - - paltform_name + - platform_name - status - monitoring_status - monitoring_mode @@ -160,7 +160,7 @@ Minimum Version of Monit tested with is 5.16. - address - version - service - - paltform_name + - platform_name - status - monitoring_status - monitoring_mode @@ -175,7 +175,7 @@ Minimum Version of Monit tested with is 5.16. - address - version - service - - paltform_name + - platform_name - status - monitoring_status - monitoring_mode @@ -189,7 +189,7 @@ Minimum Version of Monit tested with is 5.16. - address - version - service - - paltform_name + - platform_name - status - monitoring_status - monitoring_mode @@ -203,7 +203,7 @@ Minimum Version of Monit tested with is 5.16. - address - version - service - - paltform_name + - platform_name - status - monitoring_status - monitoring_mode @@ -217,7 +217,7 @@ Minimum Version of Monit tested with is 5.16. - address - version - service - - paltform_name + - platform_name - status - monitoring_status - monitoring_mode diff --git a/plugins/inputs/mqtt_consumer/mqtt_consumer.go b/plugins/inputs/mqtt_consumer/mqtt_consumer.go index 5f54f4bb4..9e0ba371f 100644 --- a/plugins/inputs/mqtt_consumer/mqtt_consumer.go +++ b/plugins/inputs/mqtt_consumer/mqtt_consumer.go @@ -194,7 +194,7 @@ func (m *MQTTConsumer) Start(acc telegraf.Accumulator) error { // AddRoute sets up the function for handling messages. These need to be // added in case we find a persistent session containing subscriptions so we - // know where to dispatch presisted and new messages to. In the alternate + // know where to dispatch persisted and new messages to. In the alternate // case that we need to create the subscriptions these will be replaced. for _, topic := range m.Topics { m.client.AddRoute(topic, m.recvMessage) @@ -218,7 +218,7 @@ func (m *MQTTConsumer) connect() error { m.state = Connected m.messages = make(map[telegraf.TrackingID]bool) - // Presistent sessions should skip subscription if a session is present, as + // Persistent sessions should skip subscription if a session is present, as // the subscriptions are stored by the server. type sessionPresent interface { SessionPresent() bool diff --git a/plugins/inputs/multifile/README.md b/plugins/inputs/multifile/README.md index 558d4e442..2d71ac159 100644 --- a/plugins/inputs/multifile/README.md +++ b/plugins/inputs/multifile/README.md @@ -40,11 +40,11 @@ Path of the file to be parsed, relative to the `base_dir`. Name of the field/tag key, defaults to `$(basename file)`. * `conversion`: Data format used to parse the file contents: - * `float(X)`: Converts the input value into a float and divides by the Xth power of 10. Efficively just moves the decimal left X places. For example a value of `123` with `float(2)` will result in `1.23`. + * `float(X)`: Converts the input value into a float and divides by the Xth power of 10. Effectively just moves the decimal left X places. For example a value of `123` with `float(2)` will result in `1.23`. * `float`: Converts the value into a float with no adjustment. Same as `float(0)`. - * `int`: Convertes the value into an integer. + * `int`: Converts the value into an integer. * `string`, `""`: No conversion. - * `bool`: Convertes the value into a boolean. + * `bool`: Converts the value into a boolean. * `tag`: File content is used as a tag. ### Example Output diff --git a/plugins/inputs/mysql/README.md b/plugins/inputs/mysql/README.md index 3e07229da..8b4717168 100644 --- a/plugins/inputs/mysql/README.md +++ b/plugins/inputs/mysql/README.md @@ -45,7 +45,7 @@ This plugin gathers the statistic data from MySQL server ## <1.6: metric_version = 1 (or unset) metric_version = 2 - ## if the list is empty, then metrics are gathered from all databasee tables + ## if the list is empty, then metrics are gathered from all database tables # table_schema_databases = [] ## gather metrics from INFORMATION_SCHEMA.TABLES for databases provided above list @@ -153,7 +153,7 @@ If you wish to remove the `name_suffix` you may use Kapacitor to copy the historical data to the default name. Do this only after retiring the old measurement name. -1. Use the techinique described above to write to multiple locations: +1. Use the technique described above to write to multiple locations: ```toml [[inputs.mysql]] servers = ["tcp(127.0.0.1:3306)/"] @@ -283,7 +283,7 @@ The unit of fields varies by the tags. * events_statements_rows_examined_total(float, number) * events_statements_tmp_tables_total(float, number) * events_statements_tmp_disk_tables_total(float, number) - * events_statements_sort_merge_passes_totales(float, number) + * events_statements_sort_merge_passes_totals(float, number) * events_statements_sort_rows_total(float, number) * events_statements_no_index_used_total(float, number) * Table schema - gathers statistics of each schema. It has following measurements diff --git a/plugins/inputs/mysql/mysql.go b/plugins/inputs/mysql/mysql.go index a2dc56505..81db026ec 100644 --- a/plugins/inputs/mysql/mysql.go +++ b/plugins/inputs/mysql/mysql.go @@ -71,7 +71,7 @@ const sampleConfig = ` ## <1.6: metric_version = 1 (or unset) metric_version = 2 - ## if the list is empty, then metrics are gathered from all databasee tables + ## if the list is empty, then metrics are gathered from all database tables # table_schema_databases = [] ## gather metrics from INFORMATION_SCHEMA.TABLES for databases provided above list diff --git a/plugins/inputs/nsq/nsq_test.go b/plugins/inputs/nsq/nsq_test.go index 1d3b541e5..23af13a4c 100644 --- a/plugins/inputs/nsq/nsq_test.go +++ b/plugins/inputs/nsq/nsq_test.go @@ -151,7 +151,7 @@ func TestNSQStatsV1(t *testing.T) { } } -// v1 version of localhost/stats?format=json reesponse body +// v1 version of localhost/stats?format=json response body var responseV1 = ` { "version": "1.0.0-compat", diff --git a/plugins/inputs/opensmtpd/README.md b/plugins/inputs/opensmtpd/README.md index 4c1949869..5bbd4be89 100644 --- a/plugins/inputs/opensmtpd/README.md +++ b/plugins/inputs/opensmtpd/README.md @@ -12,7 +12,7 @@ This plugin gathers stats from [OpenSMTPD - a FREE implementation of the server- ## The default location of the smtpctl binary can be overridden with: binary = "/usr/sbin/smtpctl" - # The default timeout of 1s can be overriden with: + # The default timeout of 1s can be overridden with: #timeout = "1s" ``` diff --git a/plugins/inputs/opensmtpd/opensmtpd.go b/plugins/inputs/opensmtpd/opensmtpd.go index 1c0e5690d..c3f76f2ef 100644 --- a/plugins/inputs/opensmtpd/opensmtpd.go +++ b/plugins/inputs/opensmtpd/opensmtpd.go @@ -37,7 +37,7 @@ var sampleConfig = ` ## The default location of the smtpctl binary can be overridden with: binary = "/usr/sbin/smtpctl" - ## The default timeout of 1000ms can be overriden with (in milliseconds): + ## The default timeout of 1000ms can be overridden with (in milliseconds): timeout = 1000 ` diff --git a/plugins/inputs/pf/README.md b/plugins/inputs/pf/README.md index 2e70de5b7..96a5ed488 100644 --- a/plugins/inputs/pf/README.md +++ b/plugins/inputs/pf/README.md @@ -1,8 +1,8 @@ # PF Plugin -The pf plugin gathers information from the FreeBSD/OpenBSD pf firewall. Currently it can retrive information about the state table: the number of current entries in the table, and counters for the number of searches, inserts, and removals to the table. +The pf plugin gathers information from the FreeBSD/OpenBSD pf firewall. Currently it can retrieve information about the state table: the number of current entries in the table, and counters for the number of searches, inserts, and removals to the table. -The pf plugin retrives this information by invoking the `pfstat` command. The `pfstat` command requires read access to the device file `/dev/pf`. You have several options to permit telegraf to run `pfctl`: +The pf plugin retrieves this information by invoking the `pfstat` command. The `pfstat` command requires read access to the device file `/dev/pf`. You have several options to permit telegraf to run `pfctl`: * Run telegraf as root. This is strongly discouraged. * Change the ownership and permissions for /dev/pf such that the user telegraf runs at can read the /dev/pf device file. This is probably not that good of an idea either. diff --git a/plugins/inputs/pgbouncer/README.md b/plugins/inputs/pgbouncer/README.md index 987b6a382..53737a81a 100644 --- a/plugins/inputs/pgbouncer/README.md +++ b/plugins/inputs/pgbouncer/README.md @@ -15,7 +15,7 @@ More information about the meaning of these metrics can be found in the ## postgres://[pqgotest[:password]]@host:port[/dbname]\ ## ?sslmode=[disable|verify-ca|verify-full] ## or a simple string: - ## host=localhost port=5432 user=pqotest password=... sslmode=... dbname=app_production + ## host=localhost port=5432 user=pqgotest password=... sslmode=... dbname=app_production ## ## All connection parameters are optional. ## diff --git a/plugins/inputs/pgbouncer/pgbouncer.go b/plugins/inputs/pgbouncer/pgbouncer.go index cbc38c869..0b8c8c16a 100644 --- a/plugins/inputs/pgbouncer/pgbouncer.go +++ b/plugins/inputs/pgbouncer/pgbouncer.go @@ -24,7 +24,7 @@ var sampleConfig = ` ## postgres://[pqgotest[:password]]@localhost[/dbname]\ ## ?sslmode=[disable|verify-ca|verify-full] ## or a simple string: - ## host=localhost user=pqotest password=... sslmode=... dbname=app_production + ## host=localhost user=pqgotest password=... sslmode=... dbname=app_production ## ## All connection parameters are optional. ## diff --git a/plugins/inputs/phpfpm/fcgi_client.go b/plugins/inputs/phpfpm/fcgi_client.go index 5a4d20019..9b42d91bd 100644 --- a/plugins/inputs/phpfpm/fcgi_client.go +++ b/plugins/inputs/phpfpm/fcgi_client.go @@ -59,7 +59,7 @@ func (client *conn) Request( rec := &record{} var err1 error - // recive until EOF or FCGI_END_REQUEST + // receive until EOF or FCGI_END_REQUEST READ_LOOP: for { err1 = rec.read(client.rwc) diff --git a/plugins/inputs/postgresql/postgresql.go b/plugins/inputs/postgresql/postgresql.go index 452c7fa2b..0911b20ce 100644 --- a/plugins/inputs/postgresql/postgresql.go +++ b/plugins/inputs/postgresql/postgresql.go @@ -26,7 +26,7 @@ var sampleConfig = ` ## postgres://[pqgotest[:password]]@localhost[/dbname]\ ## ?sslmode=[disable|verify-ca|verify-full] ## or a simple string: - ## host=localhost user=pqotest password=... sslmode=... dbname=app_production + ## host=localhost user=pqgotest password=... sslmode=... dbname=app_production ## ## All connection parameters are optional. ## diff --git a/plugins/inputs/postgresql_extensible/README.md b/plugins/inputs/postgresql_extensible/README.md index 337b13d1b..5b121b66b 100644 --- a/plugins/inputs/postgresql_extensible/README.md +++ b/plugins/inputs/postgresql_extensible/README.md @@ -16,7 +16,7 @@ The example below has two queries are specified, with the following parameters: # specify address via a url matching: # postgres://[pqgotest[:password]]@host:port[/dbname]?sslmode=... # or a simple string: - # host=localhost port=5432 user=pqotest password=... sslmode=... dbname=app_production + # host=localhost port=5432 user=pqgotest password=... sslmode=... dbname=app_production # # All connection parameters are optional. # Without the dbname parameter, the driver will default to a database @@ -71,7 +71,7 @@ The example below has two queries are specified, with the following parameters: ``` The system can be easily extended using homemade metrics collection tools or -using postgreql extensions ([pg_stat_statements](http://www.postgresql.org/docs/current/static/pgstatstatements.html), [pg_proctab](https://github.com/markwkm/pg_proctab) or [powa](http://dalibo.github.io/powa/)) +using postgresql extensions ([pg_stat_statements](http://www.postgresql.org/docs/current/static/pgstatstatements.html), [pg_proctab](https://github.com/markwkm/pg_proctab) or [powa](http://dalibo.github.io/powa/)) # Sample Queries : - telegraf.conf postgresql_extensible queries (assuming that you have configured diff --git a/plugins/inputs/postgresql_extensible/postgresql_extensible.go b/plugins/inputs/postgresql_extensible/postgresql_extensible.go index 9a3457228..f91feaf40 100644 --- a/plugins/inputs/postgresql_extensible/postgresql_extensible.go +++ b/plugins/inputs/postgresql_extensible/postgresql_extensible.go @@ -41,7 +41,7 @@ var sampleConfig = ` ## postgres://[pqgotest[:password]]@localhost[/dbname]\ ## ?sslmode=[disable|verify-ca|verify-full] ## or a simple string: - ## host=localhost user=pqotest password=... sslmode=... dbname=app_production + ## host=localhost user=pqgotest password=... sslmode=... dbname=app_production # ## All connection parameters are optional. # ## Without the dbname parameter, the driver will default to a database @@ -153,7 +153,7 @@ func (p *Postgresql) Gather(acc telegraf.Accumulator) error { columns []string ) - // Retreiving the database version + // Retrieving the database version query = `SELECT setting::integer / 100 AS version FROM pg_settings WHERE name = 'server_version_num'` if err = p.DB.QueryRow(query).Scan(&db_version); err != nil { db_version = 0 diff --git a/plugins/inputs/procstat/pgrep.go b/plugins/inputs/procstat/pgrep.go index 703febaa9..48bf76ed6 100644 --- a/plugins/inputs/procstat/pgrep.go +++ b/plugins/inputs/procstat/pgrep.go @@ -10,7 +10,7 @@ import ( "github.com/influxdata/telegraf/internal" ) -// Implemention of PIDGatherer that execs pgrep to find processes +// Implementation of PIDGatherer that execs pgrep to find processes type Pgrep struct { path string } diff --git a/plugins/inputs/rabbitmq/README.md b/plugins/inputs/rabbitmq/README.md index 1bdd553de..4a53ddc6c 100644 --- a/plugins/inputs/rabbitmq/README.md +++ b/plugins/inputs/rabbitmq/README.md @@ -53,7 +53,7 @@ For additional details reference the [RabbitMQ Management HTTP Stats][management # queue_name_include = [] # queue_name_exclude = [] - ## Federation upstreams to include and exlude specified as an array of glob + ## Federation upstreams to include and exclude specified as an array of glob ## pattern strings. Federation links can also be limited by the queue and ## exchange filters. # federation_upstream_include = [] diff --git a/plugins/inputs/rabbitmq/rabbitmq.go b/plugins/inputs/rabbitmq/rabbitmq.go index 68652ca36..cb8fbb1aa 100644 --- a/plugins/inputs/rabbitmq/rabbitmq.go +++ b/plugins/inputs/rabbitmq/rabbitmq.go @@ -15,15 +15,15 @@ import ( "github.com/influxdata/telegraf/plugins/inputs" ) -// DefaultUsername will set a default value that corrasponds to the default +// DefaultUsername will set a default value that corresponds to the default // value used by Rabbitmq const DefaultUsername = "guest" -// DefaultPassword will set a default value that corrasponds to the default +// DefaultPassword will set a default value that corresponds to the default // value used by Rabbitmq const DefaultPassword = "guest" -// DefaultURL will set a default value that corrasponds to the default value +// DefaultURL will set a default value that corresponds to the default value // used by Rabbitmq const DefaultURL = "http://localhost:15672" diff --git a/plugins/inputs/salesforce/README.md b/plugins/inputs/salesforce/README.md index 526f14a07..6883f3a90 100644 --- a/plugins/inputs/salesforce/README.md +++ b/plugins/inputs/salesforce/README.md @@ -21,7 +21,7 @@ It fetches its data from the [limits endpoint](https://developer.salesforce.com/ ### Measurements & Fields: -Salesforce provide one measurment named "salesforce". +Salesforce provide one measurement named "salesforce". Each entry is converted to snake\_case and 2 fields are created. - \_max represents the limit threshold diff --git a/plugins/inputs/salesforce/salesforce.go b/plugins/inputs/salesforce/salesforce.go index ad40ec566..b66266d3f 100644 --- a/plugins/inputs/salesforce/salesforce.go +++ b/plugins/inputs/salesforce/salesforce.go @@ -166,7 +166,7 @@ func (s *Salesforce) getLoginEndpoint() (string, error) { } } -// Authenticate with Salesfroce +// Authenticate with Salesforce func (s *Salesforce) login() error { if s.Username == "" || s.Password == "" { return errors.New("missing username or password") diff --git a/plugins/inputs/sensors/README.md b/plugins/inputs/sensors/README.md index 9075bda72..19952fd82 100644 --- a/plugins/inputs/sensors/README.md +++ b/plugins/inputs/sensors/README.md @@ -18,7 +18,7 @@ This plugin collects sensor metrics with the `sensors` executable from the lm-se ``` ### Measurements & Fields: -Fields are created dynamicaly depending on the sensors. All fields are float. +Fields are created dynamically depending on the sensors. All fields are float. ### Tags: diff --git a/plugins/inputs/snmp/snmp.go b/plugins/inputs/snmp/snmp.go index 2f8bf6d5b..57f29bfb0 100644 --- a/plugins/inputs/snmp/snmp.go +++ b/plugins/inputs/snmp/snmp.go @@ -962,9 +962,9 @@ func SnmpTranslate(oid string) (mibName string, oidNum string, oidText string, c // We could speed it up by putting a lock in snmpTranslateCache and then // returning it immediately, and multiple callers would then release the // snmpTranslateCachesLock and instead wait on the individual - // snmpTranlsation.Lock to release. But I don't know that the extra complexity + // snmpTranslation.Lock to release. But I don't know that the extra complexity // is worth it. Especially when it would slam the system pretty hard if lots - // of lookups are being perfomed. + // of lookups are being performed. stc.mibName, stc.oidNum, stc.oidText, stc.conversion, stc.err = snmpTranslateCall(oid) snmpTranslateCaches[oid] = stc diff --git a/plugins/inputs/snmp_trap/snmp_trap.go b/plugins/inputs/snmp_trap/snmp_trap.go index 80fc28f7c..cb253a7d3 100644 --- a/plugins/inputs/snmp_trap/snmp_trap.go +++ b/plugins/inputs/snmp_trap/snmp_trap.go @@ -254,7 +254,7 @@ func (s *SnmpTrap) lookup(oid string) (e mibEntry, err error) { defer s.cacheLock.Unlock() var ok bool if e, ok = s.cache[oid]; !ok { - // cache miss. exec snmptranlate + // cache miss. exec snmptranslate e, err = s.snmptranslate(oid) if err == nil { s.cache[oid] = e diff --git a/plugins/inputs/snmp_trap/snmp_trap_test.go b/plugins/inputs/snmp_trap/snmp_trap_test.go index 34dd6cde0..94781cf91 100644 --- a/plugins/inputs/snmp_trap/snmp_trap_test.go +++ b/plugins/inputs/snmp_trap/snmp_trap_test.go @@ -84,7 +84,7 @@ func TestReceiveTrap(t *testing.T) { version gosnmp.SnmpVersion trap gosnmp.SnmpTrap // include pdus - // recieve + // receive entries []entry metrics []telegraf.Metric }{ diff --git a/plugins/inputs/socket_listener/README.md b/plugins/inputs/socket_listener/README.md index ec1aa0bef..840b92709 100644 --- a/plugins/inputs/socket_listener/README.md +++ b/plugins/inputs/socket_listener/README.md @@ -82,7 +82,7 @@ setting. Instructions on how to adjust these OS settings are available below. -Some OSes (most notably, Linux) place very restricive limits on the performance +Some OSes (most notably, Linux) place very restrictive limits on the performance of UDP protocols. It is _highly_ recommended that you increase these OS limits to at least 8MB before trying to run large amounts of UDP traffic to your instance. 8MB is just a recommendation, and can be adjusted higher. diff --git a/plugins/inputs/stackdriver/stackdriver.go b/plugins/inputs/stackdriver/stackdriver.go index f8b4294b7..dc692a480 100644 --- a/plugins/inputs/stackdriver/stackdriver.go +++ b/plugins/inputs/stackdriver/stackdriver.go @@ -544,7 +544,7 @@ func (s *Stackdriver) generatetimeSeriesConfs( for _, filter := range filters { // Add filter for list metric descriptors if // includeMetricTypePrefixes is specified, - // this is more effecient than iterating over + // this is more efficient than iterating over // all metric descriptors req.Filter = filter mdRespChan, err := s.client.ListMetricDescriptors(ctx, req) diff --git a/plugins/inputs/statsd/statsd_test.go b/plugins/inputs/statsd/statsd_test.go index f3daa117b..f76681134 100644 --- a/plugins/inputs/statsd/statsd_test.go +++ b/plugins/inputs/statsd/statsd_test.go @@ -35,7 +35,7 @@ func NewTestStatsd() *Statsd { return &s } -// Test that MaxTCPConections is respected +// Test that MaxTCPConnections is respected func TestConcurrentConns(t *testing.T) { listener := Statsd{ Log: testutil.Logger{}, @@ -66,7 +66,7 @@ func TestConcurrentConns(t *testing.T) { assert.Zero(t, acc.NFields()) } -// Test that MaxTCPConections is respected when max==1 +// Test that MaxTCPConnections is respected when max==1 func TestConcurrentConns1(t *testing.T) { listener := Statsd{ Log: testutil.Logger{}, @@ -95,7 +95,7 @@ func TestConcurrentConns1(t *testing.T) { assert.Zero(t, acc.NFields()) } -// Test that MaxTCPConections is respected +// Test that MaxTCPConnections is respected func TestCloseConcurrentConns(t *testing.T) { listener := Statsd{ Log: testutil.Logger{}, diff --git a/plugins/inputs/syslog/README.md b/plugins/inputs/syslog/README.md index dca51bd97..32c5f2717 100644 --- a/plugins/inputs/syslog/README.md +++ b/plugins/inputs/syslog/README.md @@ -47,7 +47,7 @@ Syslog messages should be formatted according to ## Must be one of "octect-counting", "non-transparent". # framing = "octet-counting" - ## The trailer to be expected in case of non-trasparent framing (default = "LF"). + ## The trailer to be expected in case of non-transparent framing (default = "LF"). ## Must be one of "LF", or "NUL". # trailer = "LF" diff --git a/plugins/inputs/syslog/octetcounting_test.go b/plugins/inputs/syslog/octetcounting_test.go index ea86b808d..210b64dbe 100644 --- a/plugins/inputs/syslog/octetcounting_test.go +++ b/plugins/inputs/syslog/octetcounting_test.go @@ -280,7 +280,7 @@ func getTestCasesForOctetCounting() []testCaseStream { werr: 1, }, // { - // name: "1st/of/ko", // overflow (msglen greather then max allowed octets) + // name: "1st/of/ko", // overflow (msglen greater than max allowed octets) // data: []byte(fmt.Sprintf("8193 <%d>%d %s %s %s %s %s 12 %s", maxP, maxV, maxTS, maxH, maxA, maxPID, maxMID, message7681)), // want: []testutil.Metric{}, // }, diff --git a/plugins/inputs/syslog/syslog.go b/plugins/inputs/syslog/syslog.go index 92d134092..ecf190e47 100644 --- a/plugins/inputs/syslog/syslog.go +++ b/plugins/inputs/syslog/syslog.go @@ -87,7 +87,7 @@ var sampleConfig = ` ## Must be one of "octet-counting", "non-transparent". # framing = "octet-counting" - ## The trailer to be expected in case of non-trasparent framing (default = "LF"). + ## The trailer to be expected in case of non-transparent framing (default = "LF"). ## Must be one of "LF", or "NUL". # trailer = "LF" @@ -313,7 +313,7 @@ func (s *Syslog) handle(conn net.Conn, acc telegraf.Accumulator) { opts = append(opts, syslog.WithBestEffort()) } - // Select the parser to use depeding on transport framing + // Select the parser to use depending on transport framing if s.Framing == framing.OctetCounting { // Octet counting transparent framing p = octetcounting.NewParser(opts...) diff --git a/plugins/inputs/tcp_listener/tcp_listener_test.go b/plugins/inputs/tcp_listener/tcp_listener_test.go index 7c04ecaba..16895d674 100644 --- a/plugins/inputs/tcp_listener/tcp_listener_test.go +++ b/plugins/inputs/tcp_listener/tcp_listener_test.go @@ -141,7 +141,7 @@ func TestConnectTCP(t *testing.T) { } } -// Test that MaxTCPConections is respected +// Test that MaxTCPConnections is respected func TestConcurrentConns(t *testing.T) { listener := TcpListener{ Log: testutil.Logger{}, @@ -177,7 +177,7 @@ func TestConcurrentConns(t *testing.T) { assert.Equal(t, io.EOF, err) } -// Test that MaxTCPConections is respected when max==1 +// Test that MaxTCPConnections is respected when max==1 func TestConcurrentConns1(t *testing.T) { listener := TcpListener{ Log: testutil.Logger{}, @@ -211,7 +211,7 @@ func TestConcurrentConns1(t *testing.T) { assert.Equal(t, io.EOF, err) } -// Test that MaxTCPConections is respected +// Test that MaxTCPConnections is respected func TestCloseConcurrentConns(t *testing.T) { listener := TcpListener{ Log: testutil.Logger{}, diff --git a/plugins/inputs/unbound/README.md b/plugins/inputs/unbound/README.md index d7d5c8ba9..1ccd183bc 100644 --- a/plugins/inputs/unbound/README.md +++ b/plugins/inputs/unbound/README.md @@ -21,7 +21,7 @@ a validating, recursive, and caching DNS resolver. ## The default location of the unbound config file can be overridden with: # config_file = "/etc/unbound/unbound.conf" - ## The default timeout of 1s can be overriden with: + ## The default timeout of 1s can be overridden with: # timeout = "1s" ## When set to true, thread metrics are tagged with the thread id. diff --git a/plugins/inputs/unbound/unbound.go b/plugins/inputs/unbound/unbound.go index c8247d0cf..bb4ecde58 100644 --- a/plugins/inputs/unbound/unbound.go +++ b/plugins/inputs/unbound/unbound.go @@ -49,7 +49,7 @@ var sampleConfig = ` ## The default location of the unbound config file can be overridden with: # config_file = "/etc/unbound/unbound.conf" - ## The default timeout of 1s can be overriden with: + ## The default timeout of 1s can be overridden with: # timeout = "1s" ## When set to true, thread metrics are tagged with the thread id. @@ -126,7 +126,7 @@ func unboundRunner(cmdName string, Timeout internal.Duration, UseSudo bool, Serv // All the dots in stat name will replaced by underscores. Histogram statistics will not be collected. func (s *Unbound) Gather(acc telegraf.Accumulator) error { - // Always exclude histrogram statistics + // Always exclude histogram statistics statExcluded := []string{"histogram.*"} filterExcluded, err := filter.Compile(statExcluded) if err != nil { diff --git a/plugins/inputs/uwsgi/README.md b/plugins/inputs/uwsgi/README.md index 8053676c0..c4d41a02d 100644 --- a/plugins/inputs/uwsgi/README.md +++ b/plugins/inputs/uwsgi/README.md @@ -13,7 +13,7 @@ The uWSGI input plugin gathers metrics about uWSGI using its [Stats Server](http ## servers = ["tcp://localhost:5050", "http://localhost:1717", "unix:///tmp/statsock"] servers = ["tcp://127.0.0.1:1717"] - ## General connection timout + ## General connection timeout # timeout = "5s" ``` diff --git a/plugins/inputs/uwsgi/uwsgi.go b/plugins/inputs/uwsgi/uwsgi.go index a20f3b2bf..b13a7b3e6 100644 --- a/plugins/inputs/uwsgi/uwsgi.go +++ b/plugins/inputs/uwsgi/uwsgi.go @@ -42,7 +42,7 @@ func (u *Uwsgi) SampleConfig() string { ## servers = ["tcp://localhost:5050", "http://localhost:1717", "unix:///tmp/statsock"] servers = ["tcp://127.0.0.1:1717"] - ## General connection timout + ## General connection timeout # timeout = "5s" ` } diff --git a/plugins/inputs/varnish/README.md b/plugins/inputs/varnish/README.md index 3609b12e7..2db149804 100644 --- a/plugins/inputs/varnish/README.md +++ b/plugins/inputs/varnish/README.md @@ -19,7 +19,7 @@ This plugin gathers stats from [Varnish HTTP Cache](https://varnish-cache.org/) stats = ["MAIN.cache_hit", "MAIN.cache_miss", "MAIN.uptime"] ## Optional name for the varnish instance (or working directory) to query - ## Usually appened after -n in varnish cli + ## Usually append after -n in varnish cli # instance_name = instanceName ## Timeout for varnishstat command @@ -92,7 +92,7 @@ MEMPOOL, etc). In the output, the prefix will be used as a tag, and removed from - MAIN.s_pipe (uint64, count, Total pipe sessions) - MAIN.s_pass (uint64, count, Total pass- ed requests) - MAIN.s_fetch (uint64, count, Total backend fetches) - - MAIN.s_synth (uint64, count, Total synthethic responses) + - MAIN.s_synth (uint64, count, Total synthetic responses) - MAIN.s_req_hdrbytes (uint64, count, Request header bytes) - MAIN.s_req_bodybytes (uint64, count, Request body bytes) - MAIN.s_resp_hdrbytes (uint64, count, Response header bytes) diff --git a/plugins/inputs/varnish/varnish.go b/plugins/inputs/varnish/varnish.go index 3a18deb6c..893f00c0a 100644 --- a/plugins/inputs/varnish/varnish.go +++ b/plugins/inputs/varnish/varnish.go @@ -49,7 +49,7 @@ var sampleConfig = ` stats = ["MAIN.cache_hit", "MAIN.cache_miss", "MAIN.uptime"] ## Optional name for the varnish instance (or working directory) to query - ## Usually appened after -n in varnish cli + ## Usually append after -n in varnish cli # instance_name = instanceName ## Timeout for varnishstat command diff --git a/plugins/inputs/vsphere/README.md b/plugins/inputs/vsphere/README.md index 6f2e35029..ef9e610fd 100644 --- a/plugins/inputs/vsphere/README.md +++ b/plugins/inputs/vsphere/README.md @@ -155,11 +155,11 @@ vm_metric_exclude = [ "*" ] ## separator character to use for measurement and field names (default: "_") # separator = "_" - ## number of objects to retreive per query for realtime resources (vms and hosts) + ## number of objects to retrieve per query for realtime resources (vms and hosts) ## set to 64 for vCenter 5.5 and 6.0 (default: 256) # max_query_objects = 256 - ## number of metrics to retreive per query for non-realtime resources (clusters and datastores) + ## number of metrics to retrieve per query for non-realtime resources (clusters and datastores) ## set to 64 for vCenter 5.5 and 6.0 (default: 256) # max_query_metrics = 256 @@ -184,10 +184,10 @@ vm_metric_exclude = [ "*" ] ## Custom attributes from vCenter can be very useful for queries in order to slice the ## metrics along different dimension and for forming ad-hoc relationships. They are disabled ## by default, since they can add a considerable amount of tags to the resulting metrics. To - ## enable, simply set custom_attribute_exlude to [] (empty set) and use custom_attribute_include + ## enable, simply set custom_attribute_exclude to [] (empty set) and use custom_attribute_include ## to select the attributes you want to include. ## By default, since they can add a considerable amount of tags to the resulting metrics. To - ## enable, simply set custom_attribute_exlude to [] (empty set) and use custom_attribute_include + ## enable, simply set custom_attribute_exclude to [] (empty set) and use custom_attribute_include ## to select the attributes you want to include. # custom_attribute_include = [] # custom_attribute_exclude = ["*"] @@ -208,7 +208,7 @@ A vCenter administrator can change this setting, see this [VMware KB article](ht Any modification should be reflected in this plugin by modifying the parameter `max_query_objects` ``` - ## number of objects to retreive per query for realtime resources (vms and hosts) + ## number of objects to retrieve per query for realtime resources (vms and hosts) ## set to 64 for vCenter 5.5 and 6.0 (default: 256) # max_query_objects = 256 ``` @@ -275,12 +275,12 @@ We can extend this to looking at a cluster level: ```/DC0/host/Cluster1/*/hadoop vCenter keeps two different kinds of metrics, known as realtime and historical metrics. -* Realtime metrics: Avaialable at a 20 second granularity. These metrics are stored in memory and are very fast and cheap to query. Our tests have shown that a complete set of realtime metrics for 7000 virtual machines can be obtained in less than 20 seconds. Realtime metrics are only available on **ESXi hosts** and **virtual machine** resources. Realtime metrics are only stored for 1 hour in vCenter. +* Realtime metrics: Available at a 20 second granularity. These metrics are stored in memory and are very fast and cheap to query. Our tests have shown that a complete set of realtime metrics for 7000 virtual machines can be obtained in less than 20 seconds. Realtime metrics are only available on **ESXi hosts** and **virtual machine** resources. Realtime metrics are only stored for 1 hour in vCenter. * Historical metrics: Available at a 5 minute, 30 minutes, 2 hours and 24 hours rollup levels. The vSphere Telegraf plugin only uses the 5 minute rollup. These metrics are stored in the vCenter database and can be expensive and slow to query. Historical metrics are the only type of metrics available for **clusters**, **datastores** and **datacenters**. For more information, refer to the vSphere documentation here: https://pubs.vmware.com/vsphere-50/index.jsp?topic=%2Fcom.vmware.wssdk.pg.doc_50%2FPG_Ch16_Performance.18.2.html -This distinction has an impact on how Telegraf collects metrics. A single instance of an input plugin can have one and only one collection interval, which means that you typically set the collection interval based on the most frequently collected metric. Let's assume you set the collection interval to 1 minute. All realtime metrics will be collected every minute. Since the historical metrics are only available on a 5 minute interval, the vSphere Telegraf plugin automatically skips four out of five collection cycles for these metrics. This works fine in many cases. Problems arise when the collection of historical metrics takes longer than the collecition interval. This will cause error messages similar to this to appear in the Telegraf logs: +This distinction has an impact on how Telegraf collects metrics. A single instance of an input plugin can have one and only one collection interval, which means that you typically set the collection interval based on the most frequently collected metric. Let's assume you set the collection interval to 1 minute. All realtime metrics will be collected every minute. Since the historical metrics are only available on a 5 minute interval, the vSphere Telegraf plugin automatically skips four out of five collection cycles for these metrics. This works fine in many cases. Problems arise when the collection of historical metrics takes longer than the collection interval. This will cause error messages similar to this to appear in the Telegraf logs: ```2019-01-16T13:41:10Z W! [agent] input "inputs.vsphere" did not complete within its interval``` diff --git a/plugins/inputs/vsphere/client.go b/plugins/inputs/vsphere/client.go index 176f48323..b3096f7be 100644 --- a/plugins/inputs/vsphere/client.go +++ b/plugins/inputs/vsphere/client.go @@ -36,7 +36,7 @@ type ClientFactory struct { parent *VSphere } -// Client represents a connection to vSphere and is backed by a govmoni connection +// Client represents a connection to vSphere and is backed by a govmomi connection type Client struct { Client *govmomi.Client Views *view.Manager diff --git a/plugins/inputs/vsphere/endpoint.go b/plugins/inputs/vsphere/endpoint.go index a7d4db5ba..93d74e63f 100644 --- a/plugins/inputs/vsphere/endpoint.go +++ b/plugins/inputs/vsphere/endpoint.go @@ -535,7 +535,7 @@ func (e *Endpoint) complexMetadataSelect(ctx context.Context, res *resourceKind, } n := len(sampledObjects) if n > maxMetadataSamples { - // Shuffle samples into the maxMetadatSamples positions + // Shuffle samples into the maxMetadataSamples positions for i := 0; i < maxMetadataSamples; i++ { j := int(rand.Int31n(int32(i + 1))) t := sampledObjects[i] @@ -1159,7 +1159,7 @@ func (e *Endpoint) collectChunk(ctx context.Context, pqs queryChunk, res *resour } count++ - // Update highwater marks + // Update hiwater marks e.hwMarks.Put(moid, name, ts) } if nValues == 0 { diff --git a/plugins/inputs/vsphere/vsphere.go b/plugins/inputs/vsphere/vsphere.go index 098c49334..e9a75510f 100644 --- a/plugins/inputs/vsphere/vsphere.go +++ b/plugins/inputs/vsphere/vsphere.go @@ -200,11 +200,11 @@ var sampleConfig = ` ## separator character to use for measurement and field names (default: "_") # separator = "_" - ## number of objects to retreive per query for realtime resources (vms and hosts) + ## number of objects to retrieve per query for realtime resources (vms and hosts) ## set to 64 for vCenter 5.5 and 6.0 (default: 256) # max_query_objects = 256 - ## number of metrics to retreive per query for non-realtime resources (clusters and datastores) + ## number of metrics to retrieve per query for non-realtime resources (clusters and datastores) ## set to 64 for vCenter 5.5 and 6.0 (default: 256) # max_query_metrics = 256 @@ -229,10 +229,10 @@ var sampleConfig = ` ## Custom attributes from vCenter can be very useful for queries in order to slice the ## metrics along different dimension and for forming ad-hoc relationships. They are disabled ## by default, since they can add a considerable amount of tags to the resulting metrics. To - ## enable, simply set custom_attribute_exlude to [] (empty set) and use custom_attribute_include + ## enable, simply set custom_attribute_exclude to [] (empty set) and use custom_attribute_include ## to select the attributes you want to include. ## By default, since they can add a considerable amount of tags to the resulting metrics. To - ## enable, simply set custom_attribute_exlude to [] (empty set) and use custom_attribute_include + ## enable, simply set custom_attribute_exclude to [] (empty set) and use custom_attribute_include ## to select the attributes you want to include. # custom_attribute_include = [] # custom_attribute_exclude = ["*"] diff --git a/plugins/inputs/webhooks/github/README.md b/plugins/inputs/webhooks/github/README.md index 5115d287c..4a4e64c73 100644 --- a/plugins/inputs/webhooks/github/README.md +++ b/plugins/inputs/webhooks/github/README.md @@ -78,7 +78,7 @@ The tag values and field values show the place on the incoming JSON object where * 'issues' = `event.repository.open_issues_count` int * 'commit' = `event.deployment.sha` string * 'task' = `event.deployment.task` string -* 'environment' = `event.deployment.evnironment` string +* 'environment' = `event.deployment.environment` string * 'description' = `event.deployment.description` string #### [`deployment_status` event](https://developer.github.com/v3/activity/events/types/#deploymentstatusevent) @@ -96,7 +96,7 @@ The tag values and field values show the place on the incoming JSON object where * 'issues' = `event.repository.open_issues_count` int * 'commit' = `event.deployment.sha` string * 'task' = `event.deployment.task` string -* 'environment' = `event.deployment.evnironment` string +* 'environment' = `event.deployment.environment` string * 'description' = `event.deployment.description` string * 'depState' = `event.deployment_status.state` string * 'depDescription' = `event.deployment_status.description` string diff --git a/plugins/inputs/webhooks/particle/README.md b/plugins/inputs/webhooks/particle/README.md index 688898db0..4dc83b347 100644 --- a/plugins/inputs/webhooks/particle/README.md +++ b/plugins/inputs/webhooks/particle/README.md @@ -31,7 +31,7 @@ String data = String::format("{ \"tags\" : { ``` Escaping the "" is required in the source file. -The number of tag values and field values is not restrictied so you can send as many values per webhook call as you'd like. +The number of tag values and field values is not restricted so you can send as many values per webhook call as you'd like. You will need to enable JSON messages in the Webhooks setup of Particle.io, and make sure to check the "include default data" box as well. diff --git a/plugins/inputs/win_perf_counters/pdh.go b/plugins/inputs/win_perf_counters/pdh.go index 6a8dff10b..3a24761b9 100644 --- a/plugins/inputs/win_perf_counters/pdh.go +++ b/plugins/inputs/win_perf_counters/pdh.go @@ -214,7 +214,7 @@ func init() { // // To view all (internationalized...) counters on a system, there are three non-programmatic ways: perfmon utility, // the typeperf command, and the the registry editor. perfmon.exe is perhaps the easiest way, because it's basically a -// full implemention of the pdh.dll API, except with a GUI and all that. The registry setting also provides an +// full implementation of the pdh.dll API, except with a GUI and all that. The registry setting also provides an // interface to the available counters, and can be found at the following key: // // HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Windows NT\CurrentVersion\Perflib\CurrentLanguage diff --git a/plugins/inputs/zfs/README.md b/plugins/inputs/zfs/README.md index b60711e30..e22156bc6 100644 --- a/plugins/inputs/zfs/README.md +++ b/plugins/inputs/zfs/README.md @@ -268,7 +268,7 @@ A short description for some of the metrics. `arcstats_evict_l2_ineligible` We evicted something which cannot be stored in the l2. Reasons could be: - - We have multiple pools, we evicted something from a pool whithout an l2 device. + - We have multiple pools, we evicted something from a pool without an l2 device. - The zfs property secondary cache. `arcstats_c` Arc target size, this is the size the system thinks the arc should have. diff --git a/plugins/inputs/zfs/zfs_freebsd_test.go b/plugins/inputs/zfs/zfs_freebsd_test.go index dba135cfd..87f21f672 100644 --- a/plugins/inputs/zfs/zfs_freebsd_test.go +++ b/plugins/inputs/zfs/zfs_freebsd_test.go @@ -155,7 +155,7 @@ func TestZfsGeneratesMetrics(t *testing.T) { err = z.Gather(&acc) require.NoError(t, err) - //four pool, vdev_cache_stats and zfetchstatus metrics + //four pool, vdev_cache_stats and zfetchstats metrics intMetrics = getKstatMetricsVdevAndZfetch() acc.AssertContainsTaggedFields(t, "zfs", intMetrics, tags) diff --git a/plugins/inputs/zipkin/cmd/thrift_serialize/thrift_serialize.go b/plugins/inputs/zipkin/cmd/thrift_serialize/thrift_serialize.go index 60bf1b51a..dde89570b 100644 --- a/plugins/inputs/zipkin/cmd/thrift_serialize/thrift_serialize.go +++ b/plugins/inputs/zipkin/cmd/thrift_serialize/thrift_serialize.go @@ -5,7 +5,7 @@ vice versa. To convert from json to thrift, the json is unmarshalled, converted to zipkincore.Span structures, and marshalled into thrift binary protocol. The json must be in an array format (even if it only has one object), -because the tool automatically tries to unmarshall the json into an array of structs. +because the tool automatically tries to unmarshal the json into an array of structs. To convert from thrift to json, the opposite process must happen. The thrift binary data must be read into an array of diff --git a/plugins/outputs/amqp/README.md b/plugins/outputs/amqp/README.md index f810a0a7b..04715f8e3 100644 --- a/plugins/outputs/amqp/README.md +++ b/plugins/outputs/amqp/README.md @@ -1,6 +1,6 @@ # AMQP Output Plugin -This plugin writes to a AMQP 0-9-1 Exchange, a promenent implementation of this protocol being [RabbitMQ](https://www.rabbitmq.com/). +This plugin writes to a AMQP 0-9-1 Exchange, a prominent implementation of this protocol being [RabbitMQ](https://www.rabbitmq.com/). This plugin does not bind the exchange to a queue. @@ -40,7 +40,7 @@ For an introduction to AMQP see: ## Additional exchange arguments. # exchange_arguments = { } - # exchange_arguments = {"hash_propery" = "timestamp"} + # exchange_arguments = {"hash_property" = "timestamp"} ## Authentication credentials for the PLAIN auth_method. # username = "" diff --git a/plugins/outputs/amqp/amqp.go b/plugins/outputs/amqp/amqp.go index cb4cc4501..b00480d5a 100644 --- a/plugins/outputs/amqp/amqp.go +++ b/plugins/outputs/amqp/amqp.go @@ -99,7 +99,7 @@ var sampleConfig = ` ## Additional exchange arguments. # exchange_arguments = { } - # exchange_arguments = {"hash_propery" = "timestamp"} + # exchange_arguments = {"hash_property" = "timestamp"} ## Authentication credentials for the PLAIN auth_method. # username = "" diff --git a/plugins/outputs/application_insights/application_insights_test.go b/plugins/outputs/application_insights/application_insights_test.go index 7255ad068..5a017823c 100644 --- a/plugins/outputs/application_insights/application_insights_test.go +++ b/plugins/outputs/application_insights/application_insights_test.go @@ -288,7 +288,7 @@ func TestTagsAppliedToTelemetry(t *testing.T) { transmitter.AssertNumberOfCalls(t, "Track", len(tt.metricValueFields)) transmitter.AssertCalled(t, "Track", mock.AnythingOfType("*appinsights.MetricTelemetry")) - // Will verify that all original tags are present in telemetry.Properies map + // Will verify that all original tags are present in telemetry.Properties map verifyAdditionalTelemetry(assert, m, transmitter, tt.metricValueFields, metricName) } diff --git a/plugins/outputs/exec/exec.go b/plugins/outputs/exec/exec.go index 5995d4bca..d3697627e 100644 --- a/plugins/outputs/exec/exec.go +++ b/plugins/outputs/exec/exec.go @@ -44,12 +44,12 @@ func (e *Exec) SetSerializer(serializer serializers.Serializer) { e.serializer = serializer } -// Connect satisfies the Ouput interface. +// Connect satisfies the Output interface. func (e *Exec) Connect() error { return nil } -// Close satisfies the Ouput interface. +// Close satisfies the Output interface. func (e *Exec) Close() error { return nil } diff --git a/plugins/outputs/file/README.md b/plugins/outputs/file/README.md index 350633c56..45d0ac155 100644 --- a/plugins/outputs/file/README.md +++ b/plugins/outputs/file/README.md @@ -11,7 +11,7 @@ This plugin writes telegraf metrics to files ## Use batch serialization format instead of line based delimiting. The ## batch format allows for the production of non line based output formats and - ## may more effiently encode and write metrics. + ## may more efficiently encode and write metrics. # use_batch_format = false ## The file will be rotated after the time interval specified. When set diff --git a/plugins/outputs/file/file.go b/plugins/outputs/file/file.go index 12d70d8f3..3798f107a 100644 --- a/plugins/outputs/file/file.go +++ b/plugins/outputs/file/file.go @@ -31,7 +31,7 @@ var sampleConfig = ` ## Use batch serialization format instead of line based delimiting. The ## batch format allows for the production of non line based output formats and - ## may more effiently encode metric groups. + ## may more efficiently encode metric groups. # use_batch_format = false ## The file will be rotated after the time interval specified. When set diff --git a/plugins/outputs/influxdb/http.go b/plugins/outputs/influxdb/http.go index 2e3599788..92498f022 100644 --- a/plugins/outputs/influxdb/http.go +++ b/plugins/outputs/influxdb/http.go @@ -236,7 +236,7 @@ func (c *httpClient) CreateDatabase(ctx context.Context, database string) error } // Don't attempt to recreate the database after a 403 Forbidden error. - // This behavior exists only to maintain backwards compatiblity. + // This behavior exists only to maintain backwards compatibility. if resp.StatusCode == http.StatusForbidden { c.createDatabaseExecuted[database] = true } diff --git a/plugins/outputs/instrumental/README.md b/plugins/outputs/instrumental/README.md index 128599ee8..f8b48fd1e 100644 --- a/plugins/outputs/instrumental/README.md +++ b/plugins/outputs/instrumental/README.md @@ -20,6 +20,6 @@ by whitespace. The `increment` type is only used if the metric comes in as a cou template = "host.tags.measurement.field" ## Timeout in seconds to connect timeout = "2s" - ## Debug true - Print communcation to Instrumental + ## Debug true - Print communication to Instrumental debug = false ``` diff --git a/plugins/outputs/instrumental/instrumental.go b/plugins/outputs/instrumental/instrumental.go index a861ebc28..7284c0ca1 100644 --- a/plugins/outputs/instrumental/instrumental.go +++ b/plugins/outputs/instrumental/instrumental.go @@ -51,7 +51,7 @@ var sampleConfig = ` template = "host.tags.measurement.field" ## Timeout in seconds to connect timeout = "2s" - ## Display Communcation to Instrumental + ## Display Communication to Instrumental debug = false ` diff --git a/plugins/outputs/kinesis/README.md b/plugins/outputs/kinesis/README.md index 12b6178fd..1931dacb9 100644 --- a/plugins/outputs/kinesis/README.md +++ b/plugins/outputs/kinesis/README.md @@ -51,7 +51,7 @@ solution to scale out. ### use_random_partitionkey [DEPRECATED] -When true a random UUID will be generated and used as the partitionkey when sending data to Kinesis. This allows data to evenly spread across multiple shards in the stream. Due to using a random paritionKey there can be no guarantee of ordering when consuming the data off the shards. +When true a random UUID will be generated and used as the partitionkey when sending data to Kinesis. This allows data to evenly spread across multiple shards in the stream. Due to using a random partitionKey there can be no guarantee of ordering when consuming the data off the shards. If true then the partitionkey option will be ignored. ### partition @@ -70,7 +70,7 @@ All metrics will be mapped to the same shard which may limit throughput. #### tag -This will take the value of the specified tag from each metric as the paritionKey. +This will take the value of the specified tag from each metric as the partitionKey. If the tag is not found the `default` value will be used or `telegraf` if unspecified #### measurement diff --git a/plugins/outputs/kinesis/kinesis.go b/plugins/outputs/kinesis/kinesis.go index f6b205b1e..1aa840974 100644 --- a/plugins/outputs/kinesis/kinesis.go +++ b/plugins/outputs/kinesis/kinesis.go @@ -70,7 +70,7 @@ var sampleConfig = ` streamname = "StreamName" ## DEPRECATED: PartitionKey as used for sharding data. partitionkey = "PartitionKey" - ## DEPRECATED: If set the paritionKey will be a random UUID on every put. + ## DEPRECATED: If set the partitionKey will be a random UUID on every put. ## This allows for scaling across multiple shards in a stream. ## This will cause issues with ordering. use_random_partitionkey = false diff --git a/plugins/outputs/librato/librato.go b/plugins/outputs/librato/librato.go index 0603394ec..53bb8c124 100644 --- a/plugins/outputs/librato/librato.go +++ b/plugins/outputs/librato/librato.go @@ -32,7 +32,7 @@ type Librato struct { var reUnacceptedChar = regexp.MustCompile("[^.a-zA-Z0-9_-]") var sampleConfig = ` - ## Librator API Docs + ## Librato API Docs ## http://dev.librato.com/v1/metrics-authentication ## Librato API user api_user = "telegraf@influxdb.com" # required. diff --git a/plugins/outputs/mqtt/README.md b/plugins/outputs/mqtt/README.md index 38eec7c3b..aa028e056 100644 --- a/plugins/outputs/mqtt/README.md +++ b/plugins/outputs/mqtt/README.md @@ -53,7 +53,7 @@ This plugin writes to a [MQTT Broker](http://http://mqtt.org/) acting as a mqtt ### Optional parameters: * `username`: The username to connect MQTT server. * `password`: The password to connect MQTT server. -* `client_id`: The unique client id to connect MQTT server. If this paramater is not set then a random ID is generated. +* `client_id`: The unique client id to connect MQTT server. If this parameter is not set then a random ID is generated. * `timeout`: Timeout for write operations. default: 5s * `tls_ca`: TLS CA * `tls_cert`: TLS CERT diff --git a/plugins/outputs/stackdriver/README.md b/plugins/outputs/stackdriver/README.md index 142d1efa0..27ef3a09f 100644 --- a/plugins/outputs/stackdriver/README.md +++ b/plugins/outputs/stackdriver/README.md @@ -28,7 +28,7 @@ Additional resource labels can be configured by `resource_labels`. By default th ## Custom resource type # resource_type = "generic_node" - ## Additonal resource labels + ## Additional resource labels # [outputs.stackdriver.resource_labels] # node_id = "$HOSTNAME" # namespace = "myapp" diff --git a/plugins/outputs/stackdriver/stackdriver.go b/plugins/outputs/stackdriver/stackdriver.go index fbb946fbd..3bd38614b 100644 --- a/plugins/outputs/stackdriver/stackdriver.go +++ b/plugins/outputs/stackdriver/stackdriver.go @@ -61,7 +61,7 @@ var sampleConfig = ` ## Custom resource type # resource_type = "generic_node" - ## Additonal resource labels + ## Additional resource labels # [outputs.stackdriver.resource_labels] # node_id = "$HOSTNAME" # namespace = "myapp" diff --git a/plugins/outputs/syslog/README.md b/plugins/outputs/syslog/README.md index 65f038f57..cb9bc8965 100644 --- a/plugins/outputs/syslog/README.md +++ b/plugins/outputs/syslog/README.md @@ -42,13 +42,13 @@ Syslog messages are formatted according to ## be one of "octet-counting", "non-transparent". # framing = "octet-counting" - ## The trailer to be expected in case of non-trasparent framing (default = "LF"). + ## The trailer to be expected in case of non-transparent framing (default = "LF"). ## Must be one of "LF", or "NUL". # trailer = "LF" ## SD-PARAMs settings ## Syslog messages can contain key/value pairs within zero or more - ## structured data sections. For each unrecognised metric tag/field a + ## structured data sections. For each unrecognized metric tag/field a ## SD-PARAMS is created. ## ## Example: @@ -64,8 +64,8 @@ Syslog messages are formatted according to # sdparam_separator = "_" ## Default sdid used for tags/fields that don't contain a prefix defined in - ## the explict sdids setting below If no default is specified, no SD-PARAMs - ## will be used for unrecognised field. + ## the explicit sdids setting below If no default is specified, no SD-PARAMs + ## will be used for unrecognized field. # default_sdid = "default@32473" ## List of explicit prefixes to extract from tag/field keys and use as the diff --git a/plugins/outputs/syslog/syslog.go b/plugins/outputs/syslog/syslog.go index 582e8e920..41833f464 100644 --- a/plugins/outputs/syslog/syslog.go +++ b/plugins/outputs/syslog/syslog.go @@ -64,13 +64,13 @@ var sampleConfig = ` ## be one of "octet-counting", "non-transparent". # framing = "octet-counting" - ## The trailer to be expected in case of non-trasparent framing (default = "LF"). + ## The trailer to be expected in case of non-transparent framing (default = "LF"). ## Must be one of "LF", or "NUL". # trailer = "LF" ## SD-PARAMs settings ## Syslog messages can contain key/value pairs within zero or more - ## structured data sections. For each unrecognised metric tag/field a + ## structured data sections. For each unrecognized metric tag/field a ## SD-PARAMS is created. ## ## Example: @@ -86,8 +86,8 @@ var sampleConfig = ` # sdparam_separator = "_" ## Default sdid used for tags/fields that don't contain a prefix defined in - ## the explict sdids setting below If no default is specified, no SD-PARAMs - ## will be used for unrecognised field. + ## the explicit sdids setting below If no default is specified, no SD-PARAMs + ## will be used for unrecognized field. # default_sdid = "default@32473" ## List of explicit prefixes to extract from tag/field keys and use as the diff --git a/plugins/outputs/wavefront/README.md b/plugins/outputs/wavefront/README.md index 231e1057d..2daca328c 100644 --- a/plugins/outputs/wavefront/README.md +++ b/plugins/outputs/wavefront/README.md @@ -33,7 +33,7 @@ This plugin writes to a [Wavefront](https://www.wavefront.com) proxy, in Wavefro #convert_paths = true ## Use Strict rules to sanitize metric and tag names from invalid characters - ## When enabled forward slash (/) and comma (,) will be accpeted + ## When enabled forward slash (/) and comma (,) will be accepted #use_strict = false ## Use Regex to sanitize metric and tag names from invalid characters @@ -75,7 +75,7 @@ source of the metric. ### Wavefront Data format The expected input for Wavefront is specified in the following way: ``` - [] = [tagk1=tagv1 ...tagkN=tagvN] + [] = [tagk1=tagv1 ...tagkN=tagvN] ``` More information about the Wavefront data format is available [here](https://community.wavefront.com/docs/DOC-1031) diff --git a/plugins/outputs/wavefront/wavefront.go b/plugins/outputs/wavefront/wavefront.go index c455b6fa6..79c998e25 100644 --- a/plugins/outputs/wavefront/wavefront.go +++ b/plugins/outputs/wavefront/wavefront.go @@ -84,7 +84,7 @@ var sampleConfig = ` #convert_paths = true ## Use Strict rules to sanitize metric and tag names from invalid characters - ## When enabled forward slash (/) and comma (,) will be accpeted + ## When enabled forward slash (/) and comma (,) will be accepted #use_strict = false ## Use Regex to sanitize metric and tag names from invalid characters diff --git a/plugins/parsers/csv/README.md b/plugins/parsers/csv/README.md index bd5024a1a..9ca34d288 100644 --- a/plugins/parsers/csv/README.md +++ b/plugins/parsers/csv/README.md @@ -40,7 +40,7 @@ values. ## These columns will be skipped in the header as well. csv_skip_columns = 0 - ## The seperator between csv fields + ## The separator between csv fields ## By default, the parser assumes a comma (",") csv_delimiter = "," diff --git a/plugins/parsers/dropwizard/README.md b/plugins/parsers/dropwizard/README.md index f0ff6d15c..436518a67 100644 --- a/plugins/parsers/dropwizard/README.md +++ b/plugins/parsers/dropwizard/README.md @@ -1,6 +1,6 @@ # Dropwizard -The `dropwizard` data format can parse the [JSON Dropwizard][dropwizard] representation of a single dropwizard metric registry. By default, tags are parsed from metric names as if they were actual influxdb line protocol keys (`measurement<,tag_set>`) which can be overriden by defining a custom [template pattern][templates]. All field value types are supported, `string`, `number` and `boolean`. +The `dropwizard` data format can parse the [JSON Dropwizard][dropwizard] representation of a single dropwizard metric registry. By default, tags are parsed from metric names as if they were actual influxdb line protocol keys (`measurement<,tag_set>`) which can be overridden by defining a custom [template pattern][templates]. All field value types are supported, `string`, `number` and `boolean`. [templates]: /docs/TEMPLATE_PATTERN.md [dropwizard]: http://metrics.dropwizard.io/3.1.0/manual/json/ diff --git a/plugins/parsers/graphite/config.go b/plugins/parsers/graphite/config.go index 7a5c759e7..915077c06 100644 --- a/plugins/parsers/graphite/config.go +++ b/plugins/parsers/graphite/config.go @@ -7,7 +7,7 @@ import ( const ( // DefaultSeparator is the default join character to use when joining multiple - // measurment parts in a template. + // measurement parts in a template. DefaultSeparator = "." ) diff --git a/plugins/parsers/influx/handler.go b/plugins/parsers/influx/handler.go index 2f088d19d..ae08d5a7c 100644 --- a/plugins/parsers/influx/handler.go +++ b/plugins/parsers/influx/handler.go @@ -31,7 +31,7 @@ func (h *MetricHandler) SetTimePrecision(p time.Duration) { // comes from the server clock, truncated to the nearest unit of // measurement provided in precision. // - // When a timestamp is provided in the metric, precsision is + // When a timestamp is provided in the metric, precision is // overloaded to hold the unit of measurement of the timestamp. } diff --git a/plugins/parsers/json/README.md b/plugins/parsers/json/README.md index b318f32e0..3bfa60044 100644 --- a/plugins/parsers/json/README.md +++ b/plugins/parsers/json/README.md @@ -46,7 +46,7 @@ ignored unless specified in the `tag_key` or `json_string_fields` options. ## metric. json_time_key = "" - ## Time format is the time layout that should be used to interprete the json_time_key. + ## Time format is the time layout that should be used to interpret the json_time_key. ## The time must be `unix`, `unix_ms`, `unix_us`, `unix_ns`, or a time in the ## "reference time". To define a different format, arrange the values from ## the "reference time" in the example to match the format you will be diff --git a/plugins/parsers/wavefront/element.go b/plugins/parsers/wavefront/element.go index 3b7c875a2..56a8d870b 100644 --- a/plugins/parsers/wavefront/element.go +++ b/plugins/parsers/wavefront/element.go @@ -37,7 +37,7 @@ type LiteralParser struct { func (ep *NameParser) parse(p *PointParser, pt *Point) error { //Valid characters are: a-z, A-Z, 0-9, hyphen ("-"), underscore ("_"), dot ("."). // Forward slash ("/") and comma (",") are allowed if metricName is enclosed in double quotes. - // Delta (U+2206) is allowed as the first characeter of the + // Delta (U+2206) is allowed as the first character of the // metricName name, err := parseLiteral(p) diff --git a/plugins/processors/date/README.md b/plugins/processors/date/README.md index 215cd83e3..9a093fe0e 100644 --- a/plugins/processors/date/README.md +++ b/plugins/processors/date/README.md @@ -6,7 +6,7 @@ A common use is to add a tag that can be used to group by month or year. A few example usecases include: 1) consumption data for utilities on per month basis -2) bandwith capacity per month +2) bandwidth capacity per month 3) compare energy production or sales on a yearly or monthly basis ### Configuration diff --git a/plugins/processors/template/README.md b/plugins/processors/template/README.md index f08a96c6b..348dae096 100644 --- a/plugins/processors/template/README.md +++ b/plugins/processors/template/README.md @@ -46,7 +46,7 @@ Add measurement name as a tag: ```diff - cpu,hostname=localhost time_idle=42 -+ cpu,hostname=localhost,meaurement=cpu time_idle=42 ++ cpu,hostname=localhost,measurement=cpu time_idle=42 ``` Add the year as a tag, similar to the date processor: diff --git a/plugins/processors/topk/README.md b/plugins/processors/topk/README.md index 15046991d..308d4f9f8 100644 --- a/plugins/processors/topk/README.md +++ b/plugins/processors/topk/README.md @@ -53,7 +53,7 @@ Note that depending on the amount of metrics on each computed bucket, more than # add_rank_fields = [] ## These settings provide a way to know what values the plugin is generating - ## when aggregating metrics. The 'add_agregate_field' setting allows to + ## when aggregating metrics. The 'add_aggregate_field' setting allows to ## specify for which fields the final aggregation value is required. If the ## list is non empty, then a field will be added to each every metric for ## each field present in this setting. This field will contain diff --git a/plugins/processors/topk/topk.go b/plugins/processors/topk/topk.go index c2244c6e3..907ec1cc4 100644 --- a/plugins/processors/topk/topk.go +++ b/plugins/processors/topk/topk.go @@ -90,7 +90,7 @@ var sampleConfig = ` # add_rank_fields = [] ## These settings provide a way to know what values the plugin is generating - ## when aggregating metrics. The 'add_agregate_field' setting allows to + ## when aggregating metrics. The 'add_aggregate_field' setting allows to ## specify for which fields the final aggregation value is required. If the ## list is non empty, then a field will be added to each every metric for ## each field present in this setting. This field will contain diff --git a/plugins/processors/topk/topk_test.go b/plugins/processors/topk/topk_test.go index ff0eb4d8b..928111b29 100644 --- a/plugins/processors/topk/topk_test.go +++ b/plugins/processors/topk/topk_test.go @@ -35,7 +35,7 @@ type metricChange struct { newTags []tag // Tags that should be added to the metric runHash bool // Sometimes the metrics' HashID must be run so reflect.DeepEqual works - // This happens because telegraf.Metric mantains an internal cache of + // This happens because telegraf.Metric maintains an internal cache of // its hash value that is set when HashID() is called for the first time } @@ -149,7 +149,7 @@ func TestTopkAggregatorsSmokeTests(t *testing.T) { aggregators := []string{"mean", "sum", "max", "min"} - //The answer is equal to the original set for these particual scenarios + //The answer is equal to the original set for these particular scenarios input := MetricsSet1 answer := MetricsSet1 diff --git a/plugins/serializers/splunkmetric/README.md b/plugins/serializers/splunkmetric/README.md index acd497dbc..ba2170d9c 100644 --- a/plugins/serializers/splunkmetric/README.md +++ b/plugins/serializers/splunkmetric/README.md @@ -89,7 +89,7 @@ to manage the HEC authorization, here's a sample config for an HTTP output: data_format = "splunkmetric" ## Provides time, index, source overrides for the HEC splunkmetric_hec_routing = true - # splunkmentric_multimetric = true + # splunkmetric_multimetric = true ## Additional HTTP headers [outputs.http.headers] @@ -102,7 +102,7 @@ to manage the HEC authorization, here's a sample config for an HTTP output: ## Overrides You can override the default values for the HEC token you are using by adding additional tags to the config file. -The following aspects of the token can be overriden with tags: +The following aspects of the token can be overridden with tags: * index * source diff --git a/plugins/serializers/splunkmetric/splunkmetric.go b/plugins/serializers/splunkmetric/splunkmetric.go index 772771a10..801d0d69e 100644 --- a/plugins/serializers/splunkmetric/splunkmetric.go +++ b/plugins/serializers/splunkmetric/splunkmetric.go @@ -83,7 +83,7 @@ func (s *serializer) createMulti(metric telegraf.Metric, dataGroup HECTimeSeries dataGroup.Source = commonTags.Source dataGroup.Fields = commonTags.Fields - // Stuff the metrid data into the structure. + // Stuff the metric data into the structure. for _, field := range metric.FieldList() { value, valid := verifyValue(field.Value) @@ -101,7 +101,7 @@ func (s *serializer) createMulti(metric telegraf.Metric, dataGroup HECTimeSeries // Output the data as a fields array and host,index,time,source overrides for the HEC. metricJSON, err = json.Marshal(dataGroup) default: - // Just output the data and the time, useful for file based outuputs + // Just output the data and the time, useful for file based outputs dataGroup.Fields["time"] = dataGroup.Time metricJSON, err = json.Marshal(dataGroup.Fields) } @@ -115,7 +115,7 @@ func (s *serializer) createMulti(metric telegraf.Metric, dataGroup HECTimeSeries } func (s *serializer) createSingle(metric telegraf.Metric, dataGroup HECTimeSeries, commonTags CommonTags) (metricGroup []byte, err error) { - /* The default mode is to generate one JSON entitiy per metric (required for pre-8.0 Splunks) + /* The default mode is to generate one JSON entity per metric (required for pre-8.0 Splunks) ** ** The format for single metric is 'nameOfMetric = valueOfMetric' */ @@ -149,7 +149,7 @@ func (s *serializer) createSingle(metric telegraf.Metric, dataGroup HECTimeSerie // Output the data as a fields array and host,index,time,source overrides for the HEC. metricJSON, err = json.Marshal(dataGroup) default: - // Just output the data and the time, useful for file based outuputs + // Just output the data and the time, useful for file based outputs dataGroup.Fields["time"] = dataGroup.Time metricJSON, err = json.Marshal(dataGroup.Fields) } diff --git a/plugins/serializers/wavefront/README.md b/plugins/serializers/wavefront/README.md index 8ab77148d..3b72d95b4 100644 --- a/plugins/serializers/wavefront/README.md +++ b/plugins/serializers/wavefront/README.md @@ -9,7 +9,7 @@ The `wavefront` serializer translates the Telegraf metric format to the [Wavefro files = ["stdout"] ## Use Strict rules to sanitize metric and tag names from invalid characters - ## When enabled forward slash (/) and comma (,) will be accpeted + ## When enabled forward slash (/) and comma (,) will be accepted # wavefront_use_strict = false ## point tags to use as the source name for Wavefront (if none found, host will be used) diff --git a/scripts/build.py b/scripts/build.py index e3e791a1d..e30f44258 100755 --- a/scripts/build.py +++ b/scripts/build.py @@ -666,7 +666,7 @@ def package(build_output, pkg_name, version, nightly=False, iteration=1, static= else: if package_type == 'rpm' and release and '~' in package_version: package_version, suffix = package_version.split('~', 1) - # The ~ indicatees that this is a prerelease so we give it a leading 0. + # The ~ indicates that this is a prerelease so we give it a leading 0. package_iteration = "0.%s" % suffix fpm_command = "fpm {} --name {} -a {} -t {} --version {} --iteration {} -C {} -p {} ".format( fpm_common_args, diff --git a/testutil/accumulator.go b/testutil/accumulator.go index 5716d3518..6e5148ef7 100644 --- a/testutil/accumulator.go +++ b/testutil/accumulator.go @@ -717,7 +717,7 @@ func (a *Accumulator) BoolField(measurement string, field string) (bool, bool) { } // NopAccumulator is used for benchmarking to isolate the plugin from the internal -// telegraf accumulator machinary. +// telegraf accumulator machinery. type NopAccumulator struct{} func (n *NopAccumulator) AddFields(measurement string, fields map[string]interface{}, tags map[string]string, t ...time.Time) {