Fix assorted spelling mistakes (#7507)

This commit is contained in:
Josh Soref 2020-05-15 18:43:32 -04:00 committed by GitHub
parent f74824eecb
commit bf1eb291f2
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
41 changed files with 79 additions and 79 deletions

View File

@ -130,7 +130,7 @@ func (c *ServerConfig) TLSConfig() (*tls.Config, error) {
if tlsConfig.MinVersion != 0 && tlsConfig.MaxVersion != 0 && tlsConfig.MinVersion > tlsConfig.MaxVersion { if tlsConfig.MinVersion != 0 && tlsConfig.MaxVersion != 0 && tlsConfig.MinVersion > tlsConfig.MaxVersion {
return nil, fmt.Errorf( return nil, fmt.Errorf(
"tls min version %q can't be greater then tls max version %q", tlsConfig.MinVersion, tlsConfig.MaxVersion) "tls min version %q can't be greater than tls max version %q", tlsConfig.MinVersion, tlsConfig.MaxVersion)
} }
return tlsConfig, nil return tlsConfig, nil

View File

@ -225,7 +225,7 @@ func TestServerConfig(t *testing.T) {
expErr: true, expErr: true,
}, },
{ {
name: "TLS Max Version less then TLS Min version", name: "TLS Max Version less than TLS Min version",
server: tls.ServerConfig{ server: tls.ServerConfig{
TLSCert: pki.ServerCertPath(), TLSCert: pki.ServerCertPath(),
TLSKey: pki.ServerKeyPath(), TLSKey: pki.ServerKeyPath(),

View File

@ -334,7 +334,7 @@ func TestValueType(t *testing.T) {
assert.Equal(t, telegraf.Gauge, m.Type()) assert.Equal(t, telegraf.Gauge, m.Type())
} }
func TestCopyAggreate(t *testing.T) { func TestCopyAggregate(t *testing.T) {
m1 := baseMetric() m1 := baseMetric()
m1.SetAggregate(true) m1.SetAggregate(true)
m2 := m1.Copy() m2 := m1.Copy()

View File

@ -97,7 +97,7 @@ func TestFilter_Empty(t *testing.T) {
"foo_bar", "foo_bar",
"foo.bar", "foo.bar",
"foo-bar", "foo-bar",
"supercalifradjulisticexpialidocious", "supercalifragilisticexpialidocious",
} }
for _, measurement := range measurements { for _, measurement := range measurements {

View File

@ -295,7 +295,7 @@ func getFilteredMetrics(c *CloudWatch) ([]filteredMetric, error) {
if c.Metrics != nil { if c.Metrics != nil {
for _, m := range c.Metrics { for _, m := range c.Metrics {
metrics := []*cloudwatch.Metric{} metrics := []*cloudwatch.Metric{}
if !hasWilcard(m.Dimensions) { if !hasWildcard(m.Dimensions) {
dimensions := make([]*cloudwatch.Dimension, len(m.Dimensions)) dimensions := make([]*cloudwatch.Dimension, len(m.Dimensions))
for k, d := range m.Dimensions { for k, d := range m.Dimensions {
dimensions[k] = &cloudwatch.Dimension{ dimensions[k] = &cloudwatch.Dimension{
@ -603,7 +603,7 @@ func (f *metricCache) isValid() bool {
return f.metrics != nil && time.Since(f.built) < f.ttl return f.metrics != nil && time.Since(f.built) < f.ttl
} }
func hasWilcard(dimensions []*Dimension) bool { func hasWildcard(dimensions []*Dimension) bool {
for _, d := range dimensions { for _, d := range dimensions {
if d.Value == "" || d.Value == "*" { if d.Value == "" || d.Value == "*" {
return true return true

View File

@ -292,7 +292,7 @@ func (e *EventHub) onDelivery(
delete(groups, track.ID()) delete(groups, track.ID())
if !ok { if !ok {
// The metrics should always be found, this message indicates a programming error. // The metrics should always be found, this message indicates a programming error.
e.Log.Errorf("Could not find delievery: %d", track.ID()) e.Log.Errorf("Could not find delivery: %d", track.ID())
return true return true
} }

View File

@ -106,7 +106,7 @@ func (r *Fireboard) Gather(acc telegraf.Accumulator) error {
if resp.StatusCode == http.StatusForbidden { if resp.StatusCode == http.StatusForbidden {
return fmt.Errorf("fireboard server responded with %d [Forbidden], verify your authToken", resp.StatusCode) return fmt.Errorf("fireboard server responded with %d [Forbidden], verify your authToken", resp.StatusCode)
} }
return fmt.Errorf("fireboard responded with unexepcted status code %d", resp.StatusCode) return fmt.Errorf("fireboard responded with unexpected status code %d", resp.StatusCode)
} }
// Decode the response JSON into a new stats struct // Decode the response JSON into a new stats struct
var stats []fireboardStats var stats []fireboardStats

View File

@ -24,7 +24,7 @@ type GitHub struct {
HTTPTimeout internal.Duration `toml:"http_timeout"` HTTPTimeout internal.Duration `toml:"http_timeout"`
githubClient *github.Client githubClient *github.Client
obfusticatedToken string obfuscatedToken string
RateLimit selfstat.Stat RateLimit selfstat.Stat
RateLimitErrors selfstat.Stat RateLimitErrors selfstat.Stat
@ -67,7 +67,7 @@ func (g *GitHub) createGitHubClient(ctx context.Context) (*github.Client, error)
Timeout: g.HTTPTimeout.Duration, Timeout: g.HTTPTimeout.Duration,
} }
g.obfusticatedToken = "Unauthenticated" g.obfuscatedToken = "Unauthenticated"
if g.AccessToken != "" { if g.AccessToken != "" {
tokenSource := oauth2.StaticTokenSource( tokenSource := oauth2.StaticTokenSource(
@ -76,7 +76,7 @@ func (g *GitHub) createGitHubClient(ctx context.Context) (*github.Client, error)
oauthClient := oauth2.NewClient(ctx, tokenSource) oauthClient := oauth2.NewClient(ctx, tokenSource)
ctx = context.WithValue(ctx, oauth2.HTTPClient, oauthClient) ctx = context.WithValue(ctx, oauth2.HTTPClient, oauthClient)
g.obfusticatedToken = g.AccessToken[0:4] + "..." + g.AccessToken[len(g.AccessToken)-3:] g.obfuscatedToken = g.AccessToken[0:4] + "..." + g.AccessToken[len(g.AccessToken)-3:]
return g.newGithubClient(oauthClient) return g.newGithubClient(oauthClient)
} }
@ -105,7 +105,7 @@ func (g *GitHub) Gather(acc telegraf.Accumulator) error {
g.githubClient = githubClient g.githubClient = githubClient
tokenTags := map[string]string{ tokenTags := map[string]string{
"access_token": g.obfusticatedToken, "access_token": g.obfuscatedToken,
} }
g.RateLimitErrors = selfstat.Register("github", "rate_limit_blocks", tokenTags) g.RateLimitErrors = selfstat.Register("github", "rate_limit_blocks", tokenTags)

View File

@ -221,7 +221,7 @@ func (k *KinesisConsumer) connect(ac telegraf.Accumulator) error {
}) })
if err != nil { if err != nil {
k.cancel() k.cancel()
k.Log.Errorf("Scan encounterred an error: %s", err.Error()) k.Log.Errorf("Scan encountered an error: %s", err.Error())
k.cons = nil k.cons = nil
} }
}() }()

View File

@ -178,7 +178,7 @@ type Report struct {
Unsubscribed int `json:"unsubscribed"` Unsubscribed int `json:"unsubscribed"`
SendTime string `json:"send_time"` SendTime string `json:"send_time"`
TimeSeries []TimeSerie TimeSeries []TimeSeries
Bounces Bounces `json:"bounces"` Bounces Bounces `json:"bounces"`
Forwards Forwards `json:"forwards"` Forwards Forwards `json:"forwards"`
Opens Opens `json:"opens"` Opens Opens `json:"opens"`
@ -237,7 +237,7 @@ type ListStats struct {
ClickRate float64 `json:"click_rate"` ClickRate float64 `json:"click_rate"`
} }
type TimeSerie struct { type TimeSeries struct {
TimeStamp string `json:"timestamp"` TimeStamp string `json:"timestamp"`
EmailsSent int `json:"emails_sent"` EmailsSent int `json:"emails_sent"`
UniqueOpens int `json:"unique_opens"` UniqueOpens int `json:"unique_opens"`

View File

@ -140,7 +140,7 @@ func TestMailChimpGatherReport(t *testing.T) {
} }
func TestMailChimpGatherErroror(t *testing.T) { func TestMailChimpGatherError(t *testing.T) {
ts := httptest.NewServer( ts := httptest.NewServer(
http.HandlerFunc( http.HandlerFunc(
func(w http.ResponseWriter, r *http.Request) { func(w http.ResponseWriter, r *http.Request) {

View File

@ -84,7 +84,7 @@ type MlHost struct {
// Description of plugin returned // Description of plugin returned
func (c *Marklogic) Description() string { func (c *Marklogic) Description() string {
return "Retrives information on a specific host in a MarkLogic Cluster" return "Retrieves information on a specific host in a MarkLogic Cluster"
} }
var sampleConfig = ` var sampleConfig = `

View File

@ -32,8 +32,8 @@ const (
// Rcon package errors. // Rcon package errors.
var ( var (
ErrInvalidWrite = errors.New("Failed to write the payload corretly to remote connection.") ErrInvalidWrite = errors.New("Failed to write the payload correctly to remote connection.")
ErrInvalidRead = errors.New("Failed to read the response corretly from remote connection.") ErrInvalidRead = errors.New("Failed to read the response correctly from remote connection.")
ErrInvalidChallenge = errors.New("Server failed to mirror request challenge.") ErrInvalidChallenge = errors.New("Server failed to mirror request challenge.")
ErrUnauthorizedRequest = errors.New("Client not authorized to remote server.") ErrUnauthorizedRequest = errors.New("Client not authorized to remote server.")
ErrFailedAuthorization = errors.New("Failed to authorize to the remote server.") ErrFailedAuthorization = errors.New("Failed to authorize to the remote server.")

View File

@ -590,7 +590,7 @@ func TestConnection(t *testing.T) {
} }
} }
func TestInvalidUsernameorPassword(t *testing.T) { func TestInvalidUsernameOrPassword(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
@ -624,7 +624,7 @@ func TestInvalidUsernameorPassword(t *testing.T) {
assert.EqualError(t, err, "received status code 401 (Unauthorized), expected 200") assert.EqualError(t, err, "received status code 401 (Unauthorized), expected 200")
} }
func TestNoUsernameorPasswordConfiguration(t *testing.T) { func TestNoUsernameOrPasswordConfiguration(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {

View File

@ -341,7 +341,7 @@ func (m *MQTTConsumer) createOpts() (*mqtt.ClientOptions, error) {
} }
if len(m.Servers) == 0 { if len(m.Servers) == 0 {
return opts, fmt.Errorf("could not get host infomations") return opts, fmt.Errorf("could not get host informations")
} }
for _, server := range m.Servers { for _, server := range m.Servers {

View File

@ -715,7 +715,7 @@ func TestGatherHttpLocationZonesMetrics(t *testing.T) {
}) })
} }
func TestHatherHttpUpstreamsMetrics(t *testing.T) { func TestGatherHttpUpstreamsMetrics(t *testing.T) {
ts, n := prepareEndpoint(t, httpUpstreamsPath, defaultApiVersion, httpUpstreamsPayload) ts, n := prepareEndpoint(t, httpUpstreamsPath, defaultApiVersion, httpUpstreamsPayload)
defer ts.Close() defer ts.Close()

View File

@ -81,7 +81,7 @@ func TestParseSimpleOutputwithStatePrefix(t *testing.T) {
acc.AssertContainsTaggedFields(t, "openntpd", firstpeerfields, firstpeertags) acc.AssertContainsTaggedFields(t, "openntpd", firstpeerfields, firstpeertags)
} }
func TestParseSimpleOutputInavlidPeer(t *testing.T) { func TestParseSimpleOutputInvalidPeer(t *testing.T) {
acc := &testutil.Accumulator{} acc := &testutil.Accumulator{}
v := &Openntpd{ v := &Openntpd{
run: OpenntpdCTL(simpleOutputInvalidPeer, TestTimeout, false), run: OpenntpdCTL(simpleOutputInvalidPeer, TestTimeout, false),

View File

@ -79,7 +79,7 @@ func (pa *PuppetAgent) SampleConfig() string {
// Description returns description of PuppetAgent plugin // Description returns description of PuppetAgent plugin
func (pa *PuppetAgent) Description() string { func (pa *PuppetAgent) Description() string {
return `Reads last_run_summary.yaml file and converts to measurments` return `Reads last_run_summary.yaml file and converts to measurements`
} }
// Gather reads stats from all configured servers accumulates stats // Gather reads stats from all configured servers accumulates stats

View File

@ -127,7 +127,7 @@ func (r *Riak) gatherServer(s string, acc telegraf.Accumulator) error {
// Successful responses will always return status code 200 // Successful responses will always return status code 200
if resp.StatusCode != http.StatusOK { if resp.StatusCode != http.StatusOK {
return fmt.Errorf("riak responded with unexepcted status code %d", resp.StatusCode) return fmt.Errorf("riak responded with unexpected status code %d", resp.StatusCode)
} }
// Decode the response JSON into a new stats struct // Decode the response JSON into a new stats struct

View File

@ -105,7 +105,7 @@ var (
"Available Spare": { "Available Spare": {
Name: "Available_Spare", Name: "Available_Spare",
Parse: func(fields, deviceFields map[string]interface{}, str string) error { Parse: func(fields, deviceFields map[string]interface{}, str string) error {
return parseCommaSeperatedInt(fields, deviceFields, strings.TrimSuffix(str, "%")) return parseCommaSeparatedInt(fields, deviceFields, strings.TrimSuffix(str, "%"))
}, },
}, },
} }
@ -360,7 +360,7 @@ func gatherDisk(acc telegraf.Accumulator, timeout internal.Duration, usesudo, co
tags["id"] = attr.ID tags["id"] = attr.ID
} }
parse := parseCommaSeperatedInt parse := parseCommaSeparatedInt
if attr.Parse != nil { if attr.Parse != nil {
parse = attr.Parse parse = attr.Parse
} }
@ -421,7 +421,7 @@ func parseInt(str string) int64 {
return 0 return 0
} }
func parseCommaSeperatedInt(fields, _ map[string]interface{}, str string) error { func parseCommaSeparatedInt(fields, _ map[string]interface{}, str string) error {
i, err := strconv.ParseInt(strings.Replace(str, ",", "", -1), 10, 64) i, err := strconv.ParseInt(strings.Replace(str, ",", "", -1), 10, 64)
if err != nil { if err != nil {
return err return err

View File

@ -714,7 +714,7 @@ Transport protocol: SAS (SPL-3)
Local Time is: Wed Apr 17 15:01:28 2019 PDT Local Time is: Wed Apr 17 15:01:28 2019 PDT
SMART support is: Available - device has SMART capability. SMART support is: Available - device has SMART capability.
SMART support is: Enabled SMART support is: Enabled
Temp$rature Warning: Disabled or Not Supported Temperature Warning: Disabled or Not Supported
=== START OF READ SMART DATA SECTION === === START OF READ SMART DATA SECTION ===
SMART Health Status: OK SMART Health Status: OK

View File

@ -226,7 +226,7 @@ func addAdminCoresStatusToAcc(acc telegraf.Accumulator, adminCoreStatus *AdminCo
func addCoreMetricsToAcc(acc telegraf.Accumulator, core string, mBeansData *MBeansData, time time.Time) error { func addCoreMetricsToAcc(acc telegraf.Accumulator, core string, mBeansData *MBeansData, time time.Time) error {
var coreMetrics map[string]Core var coreMetrics map[string]Core
if len(mBeansData.SolrMbeans) < 2 { if len(mBeansData.SolrMbeans) < 2 {
return fmt.Errorf("no core metric data to unmarshall") return fmt.Errorf("no core metric data to unmarshal")
} }
if err := json.Unmarshal(mBeansData.SolrMbeans[1], &coreMetrics); err != nil { if err := json.Unmarshal(mBeansData.SolrMbeans[1], &coreMetrics); err != nil {
return err return err
@ -257,7 +257,7 @@ func addQueryHandlerMetricsToAcc(acc telegraf.Accumulator, core string, mBeansDa
var queryMetrics map[string]QueryHandler var queryMetrics map[string]QueryHandler
if len(mBeansData.SolrMbeans) < 4 { if len(mBeansData.SolrMbeans) < 4 {
return fmt.Errorf("no query handler metric data to unmarshall") return fmt.Errorf("no query handler metric data to unmarshal")
} }
if err := json.Unmarshal(mBeansData.SolrMbeans[3], &queryMetrics); err != nil { if err := json.Unmarshal(mBeansData.SolrMbeans[3], &queryMetrics); err != nil {
@ -332,7 +332,7 @@ func addUpdateHandlerMetricsToAcc(acc telegraf.Accumulator, core string, mBeansD
var updateMetrics map[string]UpdateHandler var updateMetrics map[string]UpdateHandler
if len(mBeansData.SolrMbeans) < 6 { if len(mBeansData.SolrMbeans) < 6 {
return fmt.Errorf("no update handler metric data to unmarshall") return fmt.Errorf("no update handler metric data to unmarshal")
} }
if err := json.Unmarshal(mBeansData.SolrMbeans[5], &updateMetrics); err != nil { if err := json.Unmarshal(mBeansData.SolrMbeans[5], &updateMetrics); err != nil {
return err return err
@ -410,7 +410,7 @@ func getInt(unk interface{}) int64 {
// Add cache metrics section to accumulator // Add cache metrics section to accumulator
func addCacheMetricsToAcc(acc telegraf.Accumulator, core string, mBeansData *MBeansData, time time.Time) error { func addCacheMetricsToAcc(acc telegraf.Accumulator, core string, mBeansData *MBeansData, time time.Time) error {
if len(mBeansData.SolrMbeans) < 8 { if len(mBeansData.SolrMbeans) < 8 {
return fmt.Errorf("no cache metric data to unmarshall") return fmt.Errorf("no cache metric data to unmarshal")
} }
var cacheMetrics map[string]Cache var cacheMetrics map[string]Cache
if err := json.Unmarshal(mBeansData.SolrMbeans[7], &cacheMetrics); err != nil { if err := json.Unmarshal(mBeansData.SolrMbeans[7], &cacheMetrics); err != nil {

View File

@ -218,7 +218,7 @@ func (c *stackdriverMetricClient) ListMetricDescriptors(
mdDesc, mdErr := mdResp.Next() mdDesc, mdErr := mdResp.Next()
if mdErr != nil { if mdErr != nil {
if mdErr != iterator.Done { if mdErr != iterator.Done {
c.log.Errorf("Failed iterating metric desciptor responses: %q: %v", req.String(), mdErr) c.log.Errorf("Failed iterating metric descriptor responses: %q: %v", req.String(), mdErr)
} }
break break
} }

View File

@ -101,7 +101,7 @@ func (n *Tengine) createHttpClient() (*http.Client, error) {
return client, nil return client, nil
} }
type TengineSatus struct { type TengineStatus struct {
host string host string
bytes_in uint64 bytes_in uint64
bytes_out uint64 bytes_out uint64
@ -135,7 +135,7 @@ type TengineSatus struct {
} }
func (n *Tengine) gatherUrl(addr *url.URL, acc telegraf.Accumulator) error { func (n *Tengine) gatherUrl(addr *url.URL, acc telegraf.Accumulator) error {
var tenginestatus TengineSatus var tenginestatus TengineStatus
resp, err := n.client.Get(addr.String()) resp, err := n.client.Get(addr.String())
if err != nil { if err != nil {
return fmt.Errorf("error making HTTP request to %s: %s", addr.String(), err) return fmt.Errorf("error making HTTP request to %s: %s", addr.String(), err)

View File

@ -192,7 +192,7 @@ MAIN.s_req 0 0.00 Total requests seen
MAIN.s_pipe 0 0.00 Total pipe sessions seen MAIN.s_pipe 0 0.00 Total pipe sessions seen
MAIN.s_pass 0 0.00 Total pass-ed requests seen MAIN.s_pass 0 0.00 Total pass-ed requests seen
MAIN.s_fetch 0 0.00 Total backend fetches initiated MAIN.s_fetch 0 0.00 Total backend fetches initiated
MAIN.s_synth 0 0.00 Total synthethic responses made MAIN.s_synth 0 0.00 Total synthetic responses made
MAIN.s_req_hdrbytes 0 0.00 Request header bytes MAIN.s_req_hdrbytes 0 0.00 Request header bytes
MAIN.s_req_bodybytes 0 0.00 Request body bytes MAIN.s_req_bodybytes 0 0.00 Request body bytes
MAIN.s_resp_hdrbytes 0 0.00 Response header bytes MAIN.s_resp_hdrbytes 0 0.00 Response header bytes

View File

@ -135,7 +135,7 @@ func defaultVSphere() *VSphere {
VMInclude: []string{"/**"}, VMInclude: []string{"/**"},
DatastoreMetricInclude: []string{ DatastoreMetricInclude: []string{
"disk.used.*", "disk.used.*",
"disk.provsioned.*"}, "disk.provisioned.*"},
DatastoreMetricExclude: nil, DatastoreMetricExclude: nil,
DatastoreInclude: []string{"/**"}, DatastoreInclude: []string{"/**"},
DatacenterMetricInclude: nil, DatacenterMetricInclude: nil,

View File

@ -74,7 +74,7 @@ func (m *PerformanceQueryImpl) Open() error {
// Close closes the counterPath, releases associated counter handles and frees resources // Close closes the counterPath, releases associated counter handles and frees resources
func (m *PerformanceQueryImpl) Close() error { func (m *PerformanceQueryImpl) Close() error {
if m.query == 0 { if m.query == 0 {
return errors.New("uninitialised query") return errors.New("uninitialized query")
} }
if ret := PdhCloseQuery(m.query); ret != ERROR_SUCCESS { if ret := PdhCloseQuery(m.query); ret != ERROR_SUCCESS {
@ -87,7 +87,7 @@ func (m *PerformanceQueryImpl) Close() error {
func (m *PerformanceQueryImpl) AddCounterToQuery(counterPath string) (PDH_HCOUNTER, error) { func (m *PerformanceQueryImpl) AddCounterToQuery(counterPath string) (PDH_HCOUNTER, error) {
var counterHandle PDH_HCOUNTER var counterHandle PDH_HCOUNTER
if m.query == 0 { if m.query == 0 {
return 0, errors.New("uninitialised query") return 0, errors.New("uninitialized query")
} }
if ret := PdhAddCounter(m.query, counterPath, 0, &counterHandle); ret != ERROR_SUCCESS { if ret := PdhAddCounter(m.query, counterPath, 0, &counterHandle); ret != ERROR_SUCCESS {
@ -99,7 +99,7 @@ func (m *PerformanceQueryImpl) AddCounterToQuery(counterPath string) (PDH_HCOUNT
func (m *PerformanceQueryImpl) AddEnglishCounterToQuery(counterPath string) (PDH_HCOUNTER, error) { func (m *PerformanceQueryImpl) AddEnglishCounterToQuery(counterPath string) (PDH_HCOUNTER, error) {
var counterHandle PDH_HCOUNTER var counterHandle PDH_HCOUNTER
if m.query == 0 { if m.query == 0 {
return 0, errors.New("uninitialised query") return 0, errors.New("uninitialized query")
} }
if ret := PdhAddEnglishCounter(m.query, counterPath, 0, &counterHandle); ret != ERROR_SUCCESS { if ret := PdhAddEnglishCounter(m.query, counterPath, 0, &counterHandle); ret != ERROR_SUCCESS {
return 0, NewPdhError(ret) return 0, NewPdhError(ret)
@ -184,7 +184,7 @@ func (m *PerformanceQueryImpl) GetFormattedCounterArrayDouble(hCounter PDH_HCOUN
func (m *PerformanceQueryImpl) CollectData() error { func (m *PerformanceQueryImpl) CollectData() error {
var ret uint32 var ret uint32
if m.query == 0 { if m.query == 0 {
return errors.New("uninitialised query") return errors.New("uninitialized query")
} }
if ret = PdhCollectQueryData(m.query); ret != ERROR_SUCCESS { if ret = PdhCollectQueryData(m.query); ret != ERROR_SUCCESS {
@ -195,7 +195,7 @@ func (m *PerformanceQueryImpl) CollectData() error {
func (m *PerformanceQueryImpl) CollectDataWithTime() (time.Time, error) { func (m *PerformanceQueryImpl) CollectDataWithTime() (time.Time, error) {
if m.query == 0 { if m.query == 0 {
return time.Now(), errors.New("uninitialised query") return time.Now(), errors.New("uninitialized query")
} }
ret, mtime := PdhCollectQueryDataWithTime(m.query) ret, mtime := PdhCollectQueryDataWithTime(m.query)
if ret != ERROR_SUCCESS { if ret != ERROR_SUCCESS {

View File

@ -27,15 +27,15 @@ func TestWinPerformanceQueryImpl(t *testing.T) {
_, err = query.AddCounterToQuery("") _, err = query.AddCounterToQuery("")
require.Error(t, err, "uninitialized query must return errors") require.Error(t, err, "uninitialized query must return errors")
assert.True(t, strings.Contains(err.Error(), "uninitialised")) assert.True(t, strings.Contains(err.Error(), "uninitialized"))
_, err = query.AddEnglishCounterToQuery("") _, err = query.AddEnglishCounterToQuery("")
require.Error(t, err, "uninitialized query must return errors") require.Error(t, err, "uninitialized query must return errors")
assert.True(t, strings.Contains(err.Error(), "uninitialised")) assert.True(t, strings.Contains(err.Error(), "uninitialized"))
err = query.CollectData() err = query.CollectData()
require.Error(t, err, "uninitialized query must return errors") require.Error(t, err, "uninitialized query must return errors")
assert.True(t, strings.Contains(err.Error(), "uninitialised")) assert.True(t, strings.Contains(err.Error(), "uninitialized"))
err = query.Open() err = query.Open()
require.NoError(t, err) require.NoError(t, err)

View File

@ -50,7 +50,7 @@ func (m *FakePerformanceQuery) Open() error {
func (m *FakePerformanceQuery) Close() error { func (m *FakePerformanceQuery) Close() error {
if !m.openCalled { if !m.openCalled {
return errors.New("CloSe: uninitialised query") return errors.New("CloSe: uninitialized query")
} }
m.openCalled = false m.openCalled = false
return nil return nil
@ -58,7 +58,7 @@ func (m *FakePerformanceQuery) Close() error {
func (m *FakePerformanceQuery) AddCounterToQuery(counterPath string) (PDH_HCOUNTER, error) { func (m *FakePerformanceQuery) AddCounterToQuery(counterPath string) (PDH_HCOUNTER, error) {
if !m.openCalled { if !m.openCalled {
return 0, errors.New("AddCounterToQuery: uninitialised query") return 0, errors.New("AddCounterToQuery: uninitialized query")
} }
if c, ok := m.counters[counterPath]; ok { if c, ok := m.counters[counterPath]; ok {
return c.handle, nil return c.handle, nil
@ -69,7 +69,7 @@ func (m *FakePerformanceQuery) AddCounterToQuery(counterPath string) (PDH_HCOUNT
func (m *FakePerformanceQuery) AddEnglishCounterToQuery(counterPath string) (PDH_HCOUNTER, error) { func (m *FakePerformanceQuery) AddEnglishCounterToQuery(counterPath string) (PDH_HCOUNTER, error) {
if !m.openCalled { if !m.openCalled {
return 0, errors.New("AddEnglishCounterToQuery: uninitialised query") return 0, errors.New("AddEnglishCounterToQuery: uninitialized query")
} }
if c, ok := m.counters[counterPath]; ok { if c, ok := m.counters[counterPath]; ok {
return c.handle, nil return c.handle, nil
@ -97,7 +97,7 @@ func (m *FakePerformanceQuery) ExpandWildCardPath(counterPath string) ([]string,
func (m *FakePerformanceQuery) GetFormattedCounterValueDouble(counterHandle PDH_HCOUNTER) (float64, error) { func (m *FakePerformanceQuery) GetFormattedCounterValueDouble(counterHandle PDH_HCOUNTER) (float64, error) {
if !m.openCalled { if !m.openCalled {
return 0, errors.New("GetFormattedCounterValueDouble: uninitialised query") return 0, errors.New("GetFormattedCounterValueDouble: uninitialized query")
} }
for _, counter := range m.counters { for _, counter := range m.counters {
if counter.handle == counterHandle { if counter.handle == counterHandle {
@ -129,7 +129,7 @@ func (m *FakePerformanceQuery) findCounterByHandle(counterHandle PDH_HCOUNTER) *
func (m *FakePerformanceQuery) GetFormattedCounterArrayDouble(hCounter PDH_HCOUNTER) ([]CounterValue, error) { func (m *FakePerformanceQuery) GetFormattedCounterArrayDouble(hCounter PDH_HCOUNTER) ([]CounterValue, error) {
if !m.openCalled { if !m.openCalled {
return nil, errors.New("GetFormattedCounterArrayDouble: uninitialised query") return nil, errors.New("GetFormattedCounterArrayDouble: uninitialized query")
} }
for _, c := range m.counters { for _, c := range m.counters {
if c.handle == hCounter { if c.handle == hCounter {
@ -157,14 +157,14 @@ func (m *FakePerformanceQuery) GetFormattedCounterArrayDouble(hCounter PDH_HCOUN
func (m *FakePerformanceQuery) CollectData() error { func (m *FakePerformanceQuery) CollectData() error {
if !m.openCalled { if !m.openCalled {
return errors.New("CollectData: uninitialised query") return errors.New("CollectData: uninitialized query")
} }
return nil return nil
} }
func (m *FakePerformanceQuery) CollectDataWithTime() (time.Time, error) { func (m *FakePerformanceQuery) CollectDataWithTime() (time.Time, error) {
if !m.openCalled { if !m.openCalled {
return time.Now(), errors.New("CollectData: uninitialised query") return time.Now(), errors.New("CollectData: uninitialized query")
} }
return MetricTime, nil return MetricTime, nil
} }

View File

@ -127,7 +127,7 @@ func (c *X509Cert) getCert(u *url.URL, timeout time.Duration) ([]*x509.Certifica
} }
return certs, nil return certs, nil
default: default:
return nil, fmt.Errorf("unsuported scheme '%s' in location %s", u.Scheme, u.String()) return nil, fmt.Errorf("unsupported scheme '%s' in location %s", u.Scheme, u.String())
} }
} }

View File

@ -55,7 +55,7 @@ func main() {
zipkin.HTTPBatchInterval(time.Duration(BatchTimeInterval)*time.Second)) zipkin.HTTPBatchInterval(time.Duration(BatchTimeInterval)*time.Second))
defer collector.Close() defer collector.Close()
if err != nil { if err != nil {
log.Fatalf("Error intializing zipkin http collector: %v\n", err) log.Fatalf("Error initializing zipkin http collector: %v\n", err)
} }
tracer, err := zipkin.NewTracer( tracer, err := zipkin.NewTracer(

View File

@ -117,7 +117,7 @@ func (k *KinesisOutput) Description() string {
func (k *KinesisOutput) Connect() error { func (k *KinesisOutput) Connect() error {
if k.Partition == nil { if k.Partition == nil {
log.Print("E! kinesis : Deprecated paritionkey configuration in use, please consider using outputs.kinesis.partition") log.Print("E! kinesis : Deprecated partitionkey configuration in use, please consider using outputs.kinesis.partition")
} }
// We attempt first to create a session to Kinesis using an IAMS role, if that fails it will fall through to using // We attempt first to create a session to Kinesis using an IAMS role, if that fails it will fall through to using

View File

@ -223,7 +223,7 @@ func (m *MQTT) createOpts() (*paho.ClientOptions, error) {
} }
if len(m.Servers) == 0 { if len(m.Servers) == 0 {
return opts, fmt.Errorf("could not get host infomations") return opts, fmt.Errorf("could not get host informations")
} }
for _, host := range m.Servers { for _, host := range m.Servers {
server := fmt.Sprintf("%s://%s", scheme, host) server := fmt.Sprintf("%s://%s", scheme, host)

View File

@ -16,14 +16,14 @@ import (
var ( var (
allowedChars = regexp.MustCompile(`[^a-zA-Z0-9-_./\p{L}]`) allowedChars = regexp.MustCompile(`[^a-zA-Z0-9-_./\p{L}]`)
hypenChars = strings.NewReplacer( hyphenChars = strings.NewReplacer(
"@", "-", "@", "-",
"*", "-", "*", "-",
`%`, "-", `%`, "-",
"#", "-", "#", "-",
"$", "-") "$", "-")
defaultHttpPath = "/api/put" defaultHttpPath = "/api/put"
defaultSeperator = "_" defaultSeparator = "_"
) )
type OpenTSDB struct { type OpenTSDB struct {
@ -261,8 +261,8 @@ func (o *OpenTSDB) Close() error {
} }
func sanitize(value string) string { func sanitize(value string) string {
// Apply special hypenation rules to preserve backwards compatibility // Apply special hyphenation rules to preserve backwards compatibility
value = hypenChars.Replace(value) value = hyphenChars.Replace(value)
// Replace any remaining illegal chars // Replace any remaining illegal chars
return allowedChars.ReplaceAllLiteralString(value, "_") return allowedChars.ReplaceAllLiteralString(value, "_")
} }
@ -271,7 +271,7 @@ func init() {
outputs.Add("opentsdb", func() telegraf.Output { outputs.Add("opentsdb", func() telegraf.Output {
return &OpenTSDB{ return &OpenTSDB{
HttpPath: defaultHttpPath, HttpPath: defaultHttpPath,
Separator: defaultSeperator, Separator: defaultSeparator,
} }
}) })
} }

View File

@ -243,7 +243,7 @@ func TestTrimSpace(t *testing.T) {
require.Equal(t, expectedFields, metrics[0].Fields()) require.Equal(t, expectedFields, metrics[0].Fields())
} }
func TestTrimSpaceDelimetedBySpace(t *testing.T) { func TestTrimSpaceDelimitedBySpace(t *testing.T) {
p := Parser{ p := Parser{
Delimiter: " ", Delimiter: " ",
HeaderRowCount: 1, HeaderRowCount: 1,

View File

@ -28,7 +28,7 @@ type WhiteSpaceParser struct {
type TagParser struct{} type TagParser struct{}
type LoopedParser struct { type LoopedParser struct {
wrappedParser ElementParser wrappedParser ElementParser
wsPaser *WhiteSpaceParser wsParser *WhiteSpaceParser
} }
type LiteralParser struct { type LiteralParser struct {
literal string literal string
@ -136,7 +136,7 @@ func (ep *LoopedParser) parse(p *PointParser, pt *Point) error {
if err != nil { if err != nil {
return err return err
} }
err = ep.wsPaser.parse(p, pt) err = ep.wsParser.parse(p, pt)
if err == ErrEOF { if err == ErrEOF {
break break
} }

View File

@ -47,7 +47,7 @@ func NewWavefrontElements() []ElementParser {
var elements []ElementParser var elements []ElementParser
wsParser := WhiteSpaceParser{} wsParser := WhiteSpaceParser{}
wsParserNextOpt := WhiteSpaceParser{nextOptional: true} wsParserNextOpt := WhiteSpaceParser{nextOptional: true}
repeatParser := LoopedParser{wrappedParser: &TagParser{}, wsPaser: &wsParser} repeatParser := LoopedParser{wrappedParser: &TagParser{}, wsParser: &wsParser}
elements = append(elements, &NameParser{}, &wsParser, &ValueParser{}, &wsParserNextOpt, elements = append(elements, &NameParser{}, &wsParser, &ValueParser{}, &wsParserNextOpt,
&TimestampParser{optional: true}, &wsParserNextOpt, &repeatParser) &TimestampParser{optional: true}, &wsParserNextOpt, &repeatParser)
return elements return elements

View File

@ -17,7 +17,7 @@ const DEFAULT_TEMPLATE = "host.tags.measurement.field"
var ( var (
allowedChars = regexp.MustCompile(`[^a-zA-Z0-9-:._=\p{L}]`) allowedChars = regexp.MustCompile(`[^a-zA-Z0-9-:._=\p{L}]`)
hypenChars = strings.NewReplacer( hyphenChars = strings.NewReplacer(
"/", "-", "/", "-",
"@", "-", "@", "-",
"*", "-", "*", "-",
@ -308,8 +308,8 @@ func buildTags(tags map[string]string) string {
} }
func sanitize(value string) string { func sanitize(value string) string {
// Apply special hypenation rules to preserve backwards compatibility // Apply special hyphenation rules to preserve backwards compatibility
value = hypenChars.Replace(value) value = hyphenChars.Replace(value)
// Apply rule to drop some chars to preserve backwards compatibility // Apply rule to drop some chars to preserve backwards compatibility
value = dropChars.Replace(value) value = dropChars.Replace(value)
// Replace any remaining illegal chars // Replace any remaining illegal chars

View File

@ -892,7 +892,7 @@ if __name__ == '__main__':
help='Send build stats to InfluxDB using provided database name') help='Send build stats to InfluxDB using provided database name')
parser.add_argument('--nightly', parser.add_argument('--nightly',
action='store_true', action='store_true',
help='Mark build output as nightly build (will incremement the minor version)') help='Mark build output as nightly build (will increment the minor version)')
parser.add_argument('--update', parser.add_argument('--update',
action='store_true', action='store_true',
help='Update build dependencies prior to building') help='Update build dependencies prior to building')

View File

@ -17,7 +17,7 @@ import (
) )
var ( var (
registry *rgstry registry *Registry
) )
// Stat is an interface for dealing with telegraf statistics collected // Stat is an interface for dealing with telegraf statistics collected
@ -109,12 +109,12 @@ func Metrics() []telegraf.Metric {
return metrics return metrics
} }
type rgstry struct { type Registry struct {
stats map[uint64]map[string]Stat stats map[uint64]map[string]Stat
mu sync.Mutex mu sync.Mutex
} }
func (r *rgstry) register(measurement, field string, tags map[string]string) Stat { func (r *Registry) register(measurement, field string, tags map[string]string) Stat {
r.mu.Lock() r.mu.Lock()
defer r.mu.Unlock() defer r.mu.Unlock()
@ -137,7 +137,7 @@ func (r *rgstry) register(measurement, field string, tags map[string]string) Sta
return s return s
} }
func (r *rgstry) registerTiming(measurement, field string, tags map[string]string) Stat { func (r *Registry) registerTiming(measurement, field string, tags map[string]string) Stat {
r.mu.Lock() r.mu.Lock()
defer r.mu.Unlock() defer r.mu.Unlock()
@ -160,7 +160,7 @@ func (r *rgstry) registerTiming(measurement, field string, tags map[string]strin
return s return s
} }
func (r *rgstry) get(key uint64, field string) (Stat, bool) { func (r *Registry) get(key uint64, field string) (Stat, bool) {
if _, ok := r.stats[key]; !ok { if _, ok := r.stats[key]; !ok {
return nil, false return nil, false
} }
@ -172,7 +172,7 @@ func (r *rgstry) get(key uint64, field string) (Stat, bool) {
return nil, false return nil, false
} }
func (r *rgstry) set(key uint64, s Stat) { func (r *Registry) set(key uint64, s Stat) {
if _, ok := r.stats[key]; !ok { if _, ok := r.stats[key]; !ok {
r.stats[key] = make(map[string]Stat) r.stats[key] = make(map[string]Stat)
} }
@ -201,7 +201,7 @@ func key(measurement string, tags map[string]string) uint64 {
} }
func init() { func init() {
registry = &rgstry{ registry = &Registry{
stats: make(map[uint64]map[string]Stat), stats: make(map[uint64]map[string]Stat),
} }
} }

View File

@ -18,7 +18,7 @@ var (
// testCleanup resets the global registry for test cleanup & unlocks the test lock // testCleanup resets the global registry for test cleanup & unlocks the test lock
func testCleanup() { func testCleanup() {
registry = &rgstry{ registry = &Registry{
stats: make(map[uint64]map[string]Stat), stats: make(map[uint64]map[string]Stat),
} }
testLock.Unlock() testLock.Unlock()