Fix assorted spelling mistakes (#7507)
This commit is contained in:
parent
f74824eecb
commit
bf1eb291f2
|
@ -130,7 +130,7 @@ func (c *ServerConfig) TLSConfig() (*tls.Config, error) {
|
|||
|
||||
if tlsConfig.MinVersion != 0 && tlsConfig.MaxVersion != 0 && tlsConfig.MinVersion > tlsConfig.MaxVersion {
|
||||
return nil, fmt.Errorf(
|
||||
"tls min version %q can't be greater then tls max version %q", tlsConfig.MinVersion, tlsConfig.MaxVersion)
|
||||
"tls min version %q can't be greater than tls max version %q", tlsConfig.MinVersion, tlsConfig.MaxVersion)
|
||||
}
|
||||
|
||||
return tlsConfig, nil
|
||||
|
|
|
@ -225,7 +225,7 @@ func TestServerConfig(t *testing.T) {
|
|||
expErr: true,
|
||||
},
|
||||
{
|
||||
name: "TLS Max Version less then TLS Min version",
|
||||
name: "TLS Max Version less than TLS Min version",
|
||||
server: tls.ServerConfig{
|
||||
TLSCert: pki.ServerCertPath(),
|
||||
TLSKey: pki.ServerKeyPath(),
|
||||
|
|
|
@ -334,7 +334,7 @@ func TestValueType(t *testing.T) {
|
|||
assert.Equal(t, telegraf.Gauge, m.Type())
|
||||
}
|
||||
|
||||
func TestCopyAggreate(t *testing.T) {
|
||||
func TestCopyAggregate(t *testing.T) {
|
||||
m1 := baseMetric()
|
||||
m1.SetAggregate(true)
|
||||
m2 := m1.Copy()
|
||||
|
|
|
@ -97,7 +97,7 @@ func TestFilter_Empty(t *testing.T) {
|
|||
"foo_bar",
|
||||
"foo.bar",
|
||||
"foo-bar",
|
||||
"supercalifradjulisticexpialidocious",
|
||||
"supercalifragilisticexpialidocious",
|
||||
}
|
||||
|
||||
for _, measurement := range measurements {
|
||||
|
|
|
@ -295,7 +295,7 @@ func getFilteredMetrics(c *CloudWatch) ([]filteredMetric, error) {
|
|||
if c.Metrics != nil {
|
||||
for _, m := range c.Metrics {
|
||||
metrics := []*cloudwatch.Metric{}
|
||||
if !hasWilcard(m.Dimensions) {
|
||||
if !hasWildcard(m.Dimensions) {
|
||||
dimensions := make([]*cloudwatch.Dimension, len(m.Dimensions))
|
||||
for k, d := range m.Dimensions {
|
||||
dimensions[k] = &cloudwatch.Dimension{
|
||||
|
@ -603,7 +603,7 @@ func (f *metricCache) isValid() bool {
|
|||
return f.metrics != nil && time.Since(f.built) < f.ttl
|
||||
}
|
||||
|
||||
func hasWilcard(dimensions []*Dimension) bool {
|
||||
func hasWildcard(dimensions []*Dimension) bool {
|
||||
for _, d := range dimensions {
|
||||
if d.Value == "" || d.Value == "*" {
|
||||
return true
|
||||
|
|
|
@ -292,7 +292,7 @@ func (e *EventHub) onDelivery(
|
|||
delete(groups, track.ID())
|
||||
if !ok {
|
||||
// The metrics should always be found, this message indicates a programming error.
|
||||
e.Log.Errorf("Could not find delievery: %d", track.ID())
|
||||
e.Log.Errorf("Could not find delivery: %d", track.ID())
|
||||
return true
|
||||
}
|
||||
|
||||
|
|
|
@ -106,7 +106,7 @@ func (r *Fireboard) Gather(acc telegraf.Accumulator) error {
|
|||
if resp.StatusCode == http.StatusForbidden {
|
||||
return fmt.Errorf("fireboard server responded with %d [Forbidden], verify your authToken", resp.StatusCode)
|
||||
}
|
||||
return fmt.Errorf("fireboard responded with unexepcted status code %d", resp.StatusCode)
|
||||
return fmt.Errorf("fireboard responded with unexpected status code %d", resp.StatusCode)
|
||||
}
|
||||
// Decode the response JSON into a new stats struct
|
||||
var stats []fireboardStats
|
||||
|
|
|
@ -24,7 +24,7 @@ type GitHub struct {
|
|||
HTTPTimeout internal.Duration `toml:"http_timeout"`
|
||||
githubClient *github.Client
|
||||
|
||||
obfusticatedToken string
|
||||
obfuscatedToken string
|
||||
|
||||
RateLimit selfstat.Stat
|
||||
RateLimitErrors selfstat.Stat
|
||||
|
@ -67,7 +67,7 @@ func (g *GitHub) createGitHubClient(ctx context.Context) (*github.Client, error)
|
|||
Timeout: g.HTTPTimeout.Duration,
|
||||
}
|
||||
|
||||
g.obfusticatedToken = "Unauthenticated"
|
||||
g.obfuscatedToken = "Unauthenticated"
|
||||
|
||||
if g.AccessToken != "" {
|
||||
tokenSource := oauth2.StaticTokenSource(
|
||||
|
@ -76,7 +76,7 @@ func (g *GitHub) createGitHubClient(ctx context.Context) (*github.Client, error)
|
|||
oauthClient := oauth2.NewClient(ctx, tokenSource)
|
||||
ctx = context.WithValue(ctx, oauth2.HTTPClient, oauthClient)
|
||||
|
||||
g.obfusticatedToken = g.AccessToken[0:4] + "..." + g.AccessToken[len(g.AccessToken)-3:]
|
||||
g.obfuscatedToken = g.AccessToken[0:4] + "..." + g.AccessToken[len(g.AccessToken)-3:]
|
||||
|
||||
return g.newGithubClient(oauthClient)
|
||||
}
|
||||
|
@ -105,7 +105,7 @@ func (g *GitHub) Gather(acc telegraf.Accumulator) error {
|
|||
g.githubClient = githubClient
|
||||
|
||||
tokenTags := map[string]string{
|
||||
"access_token": g.obfusticatedToken,
|
||||
"access_token": g.obfuscatedToken,
|
||||
}
|
||||
|
||||
g.RateLimitErrors = selfstat.Register("github", "rate_limit_blocks", tokenTags)
|
||||
|
|
|
@ -221,7 +221,7 @@ func (k *KinesisConsumer) connect(ac telegraf.Accumulator) error {
|
|||
})
|
||||
if err != nil {
|
||||
k.cancel()
|
||||
k.Log.Errorf("Scan encounterred an error: %s", err.Error())
|
||||
k.Log.Errorf("Scan encountered an error: %s", err.Error())
|
||||
k.cons = nil
|
||||
}
|
||||
}()
|
||||
|
|
|
@ -178,7 +178,7 @@ type Report struct {
|
|||
Unsubscribed int `json:"unsubscribed"`
|
||||
SendTime string `json:"send_time"`
|
||||
|
||||
TimeSeries []TimeSerie
|
||||
TimeSeries []TimeSeries
|
||||
Bounces Bounces `json:"bounces"`
|
||||
Forwards Forwards `json:"forwards"`
|
||||
Opens Opens `json:"opens"`
|
||||
|
@ -237,7 +237,7 @@ type ListStats struct {
|
|||
ClickRate float64 `json:"click_rate"`
|
||||
}
|
||||
|
||||
type TimeSerie struct {
|
||||
type TimeSeries struct {
|
||||
TimeStamp string `json:"timestamp"`
|
||||
EmailsSent int `json:"emails_sent"`
|
||||
UniqueOpens int `json:"unique_opens"`
|
||||
|
|
|
@ -140,7 +140,7 @@ func TestMailChimpGatherReport(t *testing.T) {
|
|||
|
||||
}
|
||||
|
||||
func TestMailChimpGatherErroror(t *testing.T) {
|
||||
func TestMailChimpGatherError(t *testing.T) {
|
||||
ts := httptest.NewServer(
|
||||
http.HandlerFunc(
|
||||
func(w http.ResponseWriter, r *http.Request) {
|
||||
|
|
|
@ -84,7 +84,7 @@ type MlHost struct {
|
|||
|
||||
// Description of plugin returned
|
||||
func (c *Marklogic) Description() string {
|
||||
return "Retrives information on a specific host in a MarkLogic Cluster"
|
||||
return "Retrieves information on a specific host in a MarkLogic Cluster"
|
||||
}
|
||||
|
||||
var sampleConfig = `
|
||||
|
|
|
@ -32,8 +32,8 @@ const (
|
|||
|
||||
// Rcon package errors.
|
||||
var (
|
||||
ErrInvalidWrite = errors.New("Failed to write the payload corretly to remote connection.")
|
||||
ErrInvalidRead = errors.New("Failed to read the response corretly from remote connection.")
|
||||
ErrInvalidWrite = errors.New("Failed to write the payload correctly to remote connection.")
|
||||
ErrInvalidRead = errors.New("Failed to read the response correctly from remote connection.")
|
||||
ErrInvalidChallenge = errors.New("Server failed to mirror request challenge.")
|
||||
ErrUnauthorizedRequest = errors.New("Client not authorized to remote server.")
|
||||
ErrFailedAuthorization = errors.New("Failed to authorize to the remote server.")
|
||||
|
|
|
@ -590,7 +590,7 @@ func TestConnection(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestInvalidUsernameorPassword(t *testing.T) {
|
||||
func TestInvalidUsernameOrPassword(t *testing.T) {
|
||||
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
|
@ -624,7 +624,7 @@ func TestInvalidUsernameorPassword(t *testing.T) {
|
|||
assert.EqualError(t, err, "received status code 401 (Unauthorized), expected 200")
|
||||
}
|
||||
|
||||
func TestNoUsernameorPasswordConfiguration(t *testing.T) {
|
||||
func TestNoUsernameOrPasswordConfiguration(t *testing.T) {
|
||||
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
|
|
|
@ -341,7 +341,7 @@ func (m *MQTTConsumer) createOpts() (*mqtt.ClientOptions, error) {
|
|||
}
|
||||
|
||||
if len(m.Servers) == 0 {
|
||||
return opts, fmt.Errorf("could not get host infomations")
|
||||
return opts, fmt.Errorf("could not get host informations")
|
||||
}
|
||||
|
||||
for _, server := range m.Servers {
|
||||
|
|
|
@ -715,7 +715,7 @@ func TestGatherHttpLocationZonesMetrics(t *testing.T) {
|
|||
})
|
||||
}
|
||||
|
||||
func TestHatherHttpUpstreamsMetrics(t *testing.T) {
|
||||
func TestGatherHttpUpstreamsMetrics(t *testing.T) {
|
||||
ts, n := prepareEndpoint(t, httpUpstreamsPath, defaultApiVersion, httpUpstreamsPayload)
|
||||
defer ts.Close()
|
||||
|
||||
|
|
|
@ -81,7 +81,7 @@ func TestParseSimpleOutputwithStatePrefix(t *testing.T) {
|
|||
acc.AssertContainsTaggedFields(t, "openntpd", firstpeerfields, firstpeertags)
|
||||
}
|
||||
|
||||
func TestParseSimpleOutputInavlidPeer(t *testing.T) {
|
||||
func TestParseSimpleOutputInvalidPeer(t *testing.T) {
|
||||
acc := &testutil.Accumulator{}
|
||||
v := &Openntpd{
|
||||
run: OpenntpdCTL(simpleOutputInvalidPeer, TestTimeout, false),
|
||||
|
|
|
@ -79,7 +79,7 @@ func (pa *PuppetAgent) SampleConfig() string {
|
|||
|
||||
// Description returns description of PuppetAgent plugin
|
||||
func (pa *PuppetAgent) Description() string {
|
||||
return `Reads last_run_summary.yaml file and converts to measurments`
|
||||
return `Reads last_run_summary.yaml file and converts to measurements`
|
||||
}
|
||||
|
||||
// Gather reads stats from all configured servers accumulates stats
|
||||
|
|
|
@ -127,7 +127,7 @@ func (r *Riak) gatherServer(s string, acc telegraf.Accumulator) error {
|
|||
|
||||
// Successful responses will always return status code 200
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return fmt.Errorf("riak responded with unexepcted status code %d", resp.StatusCode)
|
||||
return fmt.Errorf("riak responded with unexpected status code %d", resp.StatusCode)
|
||||
}
|
||||
|
||||
// Decode the response JSON into a new stats struct
|
||||
|
|
|
@ -105,7 +105,7 @@ var (
|
|||
"Available Spare": {
|
||||
Name: "Available_Spare",
|
||||
Parse: func(fields, deviceFields map[string]interface{}, str string) error {
|
||||
return parseCommaSeperatedInt(fields, deviceFields, strings.TrimSuffix(str, "%"))
|
||||
return parseCommaSeparatedInt(fields, deviceFields, strings.TrimSuffix(str, "%"))
|
||||
},
|
||||
},
|
||||
}
|
||||
|
@ -360,7 +360,7 @@ func gatherDisk(acc telegraf.Accumulator, timeout internal.Duration, usesudo, co
|
|||
tags["id"] = attr.ID
|
||||
}
|
||||
|
||||
parse := parseCommaSeperatedInt
|
||||
parse := parseCommaSeparatedInt
|
||||
if attr.Parse != nil {
|
||||
parse = attr.Parse
|
||||
}
|
||||
|
@ -421,7 +421,7 @@ func parseInt(str string) int64 {
|
|||
return 0
|
||||
}
|
||||
|
||||
func parseCommaSeperatedInt(fields, _ map[string]interface{}, str string) error {
|
||||
func parseCommaSeparatedInt(fields, _ map[string]interface{}, str string) error {
|
||||
i, err := strconv.ParseInt(strings.Replace(str, ",", "", -1), 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
|
@ -714,7 +714,7 @@ Transport protocol: SAS (SPL-3)
|
|||
Local Time is: Wed Apr 17 15:01:28 2019 PDT
|
||||
SMART support is: Available - device has SMART capability.
|
||||
SMART support is: Enabled
|
||||
Temp$rature Warning: Disabled or Not Supported
|
||||
Temperature Warning: Disabled or Not Supported
|
||||
|
||||
=== START OF READ SMART DATA SECTION ===
|
||||
SMART Health Status: OK
|
||||
|
|
|
@ -226,7 +226,7 @@ func addAdminCoresStatusToAcc(acc telegraf.Accumulator, adminCoreStatus *AdminCo
|
|||
func addCoreMetricsToAcc(acc telegraf.Accumulator, core string, mBeansData *MBeansData, time time.Time) error {
|
||||
var coreMetrics map[string]Core
|
||||
if len(mBeansData.SolrMbeans) < 2 {
|
||||
return fmt.Errorf("no core metric data to unmarshall")
|
||||
return fmt.Errorf("no core metric data to unmarshal")
|
||||
}
|
||||
if err := json.Unmarshal(mBeansData.SolrMbeans[1], &coreMetrics); err != nil {
|
||||
return err
|
||||
|
@ -257,7 +257,7 @@ func addQueryHandlerMetricsToAcc(acc telegraf.Accumulator, core string, mBeansDa
|
|||
var queryMetrics map[string]QueryHandler
|
||||
|
||||
if len(mBeansData.SolrMbeans) < 4 {
|
||||
return fmt.Errorf("no query handler metric data to unmarshall")
|
||||
return fmt.Errorf("no query handler metric data to unmarshal")
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(mBeansData.SolrMbeans[3], &queryMetrics); err != nil {
|
||||
|
@ -332,7 +332,7 @@ func addUpdateHandlerMetricsToAcc(acc telegraf.Accumulator, core string, mBeansD
|
|||
var updateMetrics map[string]UpdateHandler
|
||||
|
||||
if len(mBeansData.SolrMbeans) < 6 {
|
||||
return fmt.Errorf("no update handler metric data to unmarshall")
|
||||
return fmt.Errorf("no update handler metric data to unmarshal")
|
||||
}
|
||||
if err := json.Unmarshal(mBeansData.SolrMbeans[5], &updateMetrics); err != nil {
|
||||
return err
|
||||
|
@ -410,7 +410,7 @@ func getInt(unk interface{}) int64 {
|
|||
// Add cache metrics section to accumulator
|
||||
func addCacheMetricsToAcc(acc telegraf.Accumulator, core string, mBeansData *MBeansData, time time.Time) error {
|
||||
if len(mBeansData.SolrMbeans) < 8 {
|
||||
return fmt.Errorf("no cache metric data to unmarshall")
|
||||
return fmt.Errorf("no cache metric data to unmarshal")
|
||||
}
|
||||
var cacheMetrics map[string]Cache
|
||||
if err := json.Unmarshal(mBeansData.SolrMbeans[7], &cacheMetrics); err != nil {
|
||||
|
|
|
@ -218,7 +218,7 @@ func (c *stackdriverMetricClient) ListMetricDescriptors(
|
|||
mdDesc, mdErr := mdResp.Next()
|
||||
if mdErr != nil {
|
||||
if mdErr != iterator.Done {
|
||||
c.log.Errorf("Failed iterating metric desciptor responses: %q: %v", req.String(), mdErr)
|
||||
c.log.Errorf("Failed iterating metric descriptor responses: %q: %v", req.String(), mdErr)
|
||||
}
|
||||
break
|
||||
}
|
||||
|
|
|
@ -101,7 +101,7 @@ func (n *Tengine) createHttpClient() (*http.Client, error) {
|
|||
return client, nil
|
||||
}
|
||||
|
||||
type TengineSatus struct {
|
||||
type TengineStatus struct {
|
||||
host string
|
||||
bytes_in uint64
|
||||
bytes_out uint64
|
||||
|
@ -135,7 +135,7 @@ type TengineSatus struct {
|
|||
}
|
||||
|
||||
func (n *Tengine) gatherUrl(addr *url.URL, acc telegraf.Accumulator) error {
|
||||
var tenginestatus TengineSatus
|
||||
var tenginestatus TengineStatus
|
||||
resp, err := n.client.Get(addr.String())
|
||||
if err != nil {
|
||||
return fmt.Errorf("error making HTTP request to %s: %s", addr.String(), err)
|
||||
|
|
|
@ -192,7 +192,7 @@ MAIN.s_req 0 0.00 Total requests seen
|
|||
MAIN.s_pipe 0 0.00 Total pipe sessions seen
|
||||
MAIN.s_pass 0 0.00 Total pass-ed requests seen
|
||||
MAIN.s_fetch 0 0.00 Total backend fetches initiated
|
||||
MAIN.s_synth 0 0.00 Total synthethic responses made
|
||||
MAIN.s_synth 0 0.00 Total synthetic responses made
|
||||
MAIN.s_req_hdrbytes 0 0.00 Request header bytes
|
||||
MAIN.s_req_bodybytes 0 0.00 Request body bytes
|
||||
MAIN.s_resp_hdrbytes 0 0.00 Response header bytes
|
||||
|
|
|
@ -135,7 +135,7 @@ func defaultVSphere() *VSphere {
|
|||
VMInclude: []string{"/**"},
|
||||
DatastoreMetricInclude: []string{
|
||||
"disk.used.*",
|
||||
"disk.provsioned.*"},
|
||||
"disk.provisioned.*"},
|
||||
DatastoreMetricExclude: nil,
|
||||
DatastoreInclude: []string{"/**"},
|
||||
DatacenterMetricInclude: nil,
|
||||
|
|
|
@ -74,7 +74,7 @@ func (m *PerformanceQueryImpl) Open() error {
|
|||
// Close closes the counterPath, releases associated counter handles and frees resources
|
||||
func (m *PerformanceQueryImpl) Close() error {
|
||||
if m.query == 0 {
|
||||
return errors.New("uninitialised query")
|
||||
return errors.New("uninitialized query")
|
||||
}
|
||||
|
||||
if ret := PdhCloseQuery(m.query); ret != ERROR_SUCCESS {
|
||||
|
@ -87,7 +87,7 @@ func (m *PerformanceQueryImpl) Close() error {
|
|||
func (m *PerformanceQueryImpl) AddCounterToQuery(counterPath string) (PDH_HCOUNTER, error) {
|
||||
var counterHandle PDH_HCOUNTER
|
||||
if m.query == 0 {
|
||||
return 0, errors.New("uninitialised query")
|
||||
return 0, errors.New("uninitialized query")
|
||||
}
|
||||
|
||||
if ret := PdhAddCounter(m.query, counterPath, 0, &counterHandle); ret != ERROR_SUCCESS {
|
||||
|
@ -99,7 +99,7 @@ func (m *PerformanceQueryImpl) AddCounterToQuery(counterPath string) (PDH_HCOUNT
|
|||
func (m *PerformanceQueryImpl) AddEnglishCounterToQuery(counterPath string) (PDH_HCOUNTER, error) {
|
||||
var counterHandle PDH_HCOUNTER
|
||||
if m.query == 0 {
|
||||
return 0, errors.New("uninitialised query")
|
||||
return 0, errors.New("uninitialized query")
|
||||
}
|
||||
if ret := PdhAddEnglishCounter(m.query, counterPath, 0, &counterHandle); ret != ERROR_SUCCESS {
|
||||
return 0, NewPdhError(ret)
|
||||
|
@ -184,7 +184,7 @@ func (m *PerformanceQueryImpl) GetFormattedCounterArrayDouble(hCounter PDH_HCOUN
|
|||
func (m *PerformanceQueryImpl) CollectData() error {
|
||||
var ret uint32
|
||||
if m.query == 0 {
|
||||
return errors.New("uninitialised query")
|
||||
return errors.New("uninitialized query")
|
||||
}
|
||||
|
||||
if ret = PdhCollectQueryData(m.query); ret != ERROR_SUCCESS {
|
||||
|
@ -195,7 +195,7 @@ func (m *PerformanceQueryImpl) CollectData() error {
|
|||
|
||||
func (m *PerformanceQueryImpl) CollectDataWithTime() (time.Time, error) {
|
||||
if m.query == 0 {
|
||||
return time.Now(), errors.New("uninitialised query")
|
||||
return time.Now(), errors.New("uninitialized query")
|
||||
}
|
||||
ret, mtime := PdhCollectQueryDataWithTime(m.query)
|
||||
if ret != ERROR_SUCCESS {
|
||||
|
|
|
@ -27,15 +27,15 @@ func TestWinPerformanceQueryImpl(t *testing.T) {
|
|||
|
||||
_, err = query.AddCounterToQuery("")
|
||||
require.Error(t, err, "uninitialized query must return errors")
|
||||
assert.True(t, strings.Contains(err.Error(), "uninitialised"))
|
||||
assert.True(t, strings.Contains(err.Error(), "uninitialized"))
|
||||
|
||||
_, err = query.AddEnglishCounterToQuery("")
|
||||
require.Error(t, err, "uninitialized query must return errors")
|
||||
assert.True(t, strings.Contains(err.Error(), "uninitialised"))
|
||||
assert.True(t, strings.Contains(err.Error(), "uninitialized"))
|
||||
|
||||
err = query.CollectData()
|
||||
require.Error(t, err, "uninitialized query must return errors")
|
||||
assert.True(t, strings.Contains(err.Error(), "uninitialised"))
|
||||
assert.True(t, strings.Contains(err.Error(), "uninitialized"))
|
||||
|
||||
err = query.Open()
|
||||
require.NoError(t, err)
|
||||
|
|
|
@ -50,7 +50,7 @@ func (m *FakePerformanceQuery) Open() error {
|
|||
|
||||
func (m *FakePerformanceQuery) Close() error {
|
||||
if !m.openCalled {
|
||||
return errors.New("CloSe: uninitialised query")
|
||||
return errors.New("CloSe: uninitialized query")
|
||||
}
|
||||
m.openCalled = false
|
||||
return nil
|
||||
|
@ -58,7 +58,7 @@ func (m *FakePerformanceQuery) Close() error {
|
|||
|
||||
func (m *FakePerformanceQuery) AddCounterToQuery(counterPath string) (PDH_HCOUNTER, error) {
|
||||
if !m.openCalled {
|
||||
return 0, errors.New("AddCounterToQuery: uninitialised query")
|
||||
return 0, errors.New("AddCounterToQuery: uninitialized query")
|
||||
}
|
||||
if c, ok := m.counters[counterPath]; ok {
|
||||
return c.handle, nil
|
||||
|
@ -69,7 +69,7 @@ func (m *FakePerformanceQuery) AddCounterToQuery(counterPath string) (PDH_HCOUNT
|
|||
|
||||
func (m *FakePerformanceQuery) AddEnglishCounterToQuery(counterPath string) (PDH_HCOUNTER, error) {
|
||||
if !m.openCalled {
|
||||
return 0, errors.New("AddEnglishCounterToQuery: uninitialised query")
|
||||
return 0, errors.New("AddEnglishCounterToQuery: uninitialized query")
|
||||
}
|
||||
if c, ok := m.counters[counterPath]; ok {
|
||||
return c.handle, nil
|
||||
|
@ -97,7 +97,7 @@ func (m *FakePerformanceQuery) ExpandWildCardPath(counterPath string) ([]string,
|
|||
|
||||
func (m *FakePerformanceQuery) GetFormattedCounterValueDouble(counterHandle PDH_HCOUNTER) (float64, error) {
|
||||
if !m.openCalled {
|
||||
return 0, errors.New("GetFormattedCounterValueDouble: uninitialised query")
|
||||
return 0, errors.New("GetFormattedCounterValueDouble: uninitialized query")
|
||||
}
|
||||
for _, counter := range m.counters {
|
||||
if counter.handle == counterHandle {
|
||||
|
@ -129,7 +129,7 @@ func (m *FakePerformanceQuery) findCounterByHandle(counterHandle PDH_HCOUNTER) *
|
|||
|
||||
func (m *FakePerformanceQuery) GetFormattedCounterArrayDouble(hCounter PDH_HCOUNTER) ([]CounterValue, error) {
|
||||
if !m.openCalled {
|
||||
return nil, errors.New("GetFormattedCounterArrayDouble: uninitialised query")
|
||||
return nil, errors.New("GetFormattedCounterArrayDouble: uninitialized query")
|
||||
}
|
||||
for _, c := range m.counters {
|
||||
if c.handle == hCounter {
|
||||
|
@ -157,14 +157,14 @@ func (m *FakePerformanceQuery) GetFormattedCounterArrayDouble(hCounter PDH_HCOUN
|
|||
|
||||
func (m *FakePerformanceQuery) CollectData() error {
|
||||
if !m.openCalled {
|
||||
return errors.New("CollectData: uninitialised query")
|
||||
return errors.New("CollectData: uninitialized query")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *FakePerformanceQuery) CollectDataWithTime() (time.Time, error) {
|
||||
if !m.openCalled {
|
||||
return time.Now(), errors.New("CollectData: uninitialised query")
|
||||
return time.Now(), errors.New("CollectData: uninitialized query")
|
||||
}
|
||||
return MetricTime, nil
|
||||
}
|
||||
|
|
|
@ -127,7 +127,7 @@ func (c *X509Cert) getCert(u *url.URL, timeout time.Duration) ([]*x509.Certifica
|
|||
}
|
||||
return certs, nil
|
||||
default:
|
||||
return nil, fmt.Errorf("unsuported scheme '%s' in location %s", u.Scheme, u.String())
|
||||
return nil, fmt.Errorf("unsupported scheme '%s' in location %s", u.Scheme, u.String())
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -55,7 +55,7 @@ func main() {
|
|||
zipkin.HTTPBatchInterval(time.Duration(BatchTimeInterval)*time.Second))
|
||||
defer collector.Close()
|
||||
if err != nil {
|
||||
log.Fatalf("Error intializing zipkin http collector: %v\n", err)
|
||||
log.Fatalf("Error initializing zipkin http collector: %v\n", err)
|
||||
}
|
||||
|
||||
tracer, err := zipkin.NewTracer(
|
||||
|
|
|
@ -117,7 +117,7 @@ func (k *KinesisOutput) Description() string {
|
|||
|
||||
func (k *KinesisOutput) Connect() error {
|
||||
if k.Partition == nil {
|
||||
log.Print("E! kinesis : Deprecated paritionkey configuration in use, please consider using outputs.kinesis.partition")
|
||||
log.Print("E! kinesis : Deprecated partitionkey configuration in use, please consider using outputs.kinesis.partition")
|
||||
}
|
||||
|
||||
// We attempt first to create a session to Kinesis using an IAMS role, if that fails it will fall through to using
|
||||
|
|
|
@ -223,7 +223,7 @@ func (m *MQTT) createOpts() (*paho.ClientOptions, error) {
|
|||
}
|
||||
|
||||
if len(m.Servers) == 0 {
|
||||
return opts, fmt.Errorf("could not get host infomations")
|
||||
return opts, fmt.Errorf("could not get host informations")
|
||||
}
|
||||
for _, host := range m.Servers {
|
||||
server := fmt.Sprintf("%s://%s", scheme, host)
|
||||
|
|
|
@ -16,14 +16,14 @@ import (
|
|||
|
||||
var (
|
||||
allowedChars = regexp.MustCompile(`[^a-zA-Z0-9-_./\p{L}]`)
|
||||
hypenChars = strings.NewReplacer(
|
||||
hyphenChars = strings.NewReplacer(
|
||||
"@", "-",
|
||||
"*", "-",
|
||||
`%`, "-",
|
||||
"#", "-",
|
||||
"$", "-")
|
||||
defaultHttpPath = "/api/put"
|
||||
defaultSeperator = "_"
|
||||
defaultSeparator = "_"
|
||||
)
|
||||
|
||||
type OpenTSDB struct {
|
||||
|
@ -261,8 +261,8 @@ func (o *OpenTSDB) Close() error {
|
|||
}
|
||||
|
||||
func sanitize(value string) string {
|
||||
// Apply special hypenation rules to preserve backwards compatibility
|
||||
value = hypenChars.Replace(value)
|
||||
// Apply special hyphenation rules to preserve backwards compatibility
|
||||
value = hyphenChars.Replace(value)
|
||||
// Replace any remaining illegal chars
|
||||
return allowedChars.ReplaceAllLiteralString(value, "_")
|
||||
}
|
||||
|
@ -271,7 +271,7 @@ func init() {
|
|||
outputs.Add("opentsdb", func() telegraf.Output {
|
||||
return &OpenTSDB{
|
||||
HttpPath: defaultHttpPath,
|
||||
Separator: defaultSeperator,
|
||||
Separator: defaultSeparator,
|
||||
}
|
||||
})
|
||||
}
|
||||
|
|
|
@ -243,7 +243,7 @@ func TestTrimSpace(t *testing.T) {
|
|||
require.Equal(t, expectedFields, metrics[0].Fields())
|
||||
}
|
||||
|
||||
func TestTrimSpaceDelimetedBySpace(t *testing.T) {
|
||||
func TestTrimSpaceDelimitedBySpace(t *testing.T) {
|
||||
p := Parser{
|
||||
Delimiter: " ",
|
||||
HeaderRowCount: 1,
|
||||
|
|
|
@ -28,7 +28,7 @@ type WhiteSpaceParser struct {
|
|||
type TagParser struct{}
|
||||
type LoopedParser struct {
|
||||
wrappedParser ElementParser
|
||||
wsPaser *WhiteSpaceParser
|
||||
wsParser *WhiteSpaceParser
|
||||
}
|
||||
type LiteralParser struct {
|
||||
literal string
|
||||
|
@ -136,7 +136,7 @@ func (ep *LoopedParser) parse(p *PointParser, pt *Point) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = ep.wsPaser.parse(p, pt)
|
||||
err = ep.wsParser.parse(p, pt)
|
||||
if err == ErrEOF {
|
||||
break
|
||||
}
|
||||
|
|
|
@ -47,7 +47,7 @@ func NewWavefrontElements() []ElementParser {
|
|||
var elements []ElementParser
|
||||
wsParser := WhiteSpaceParser{}
|
||||
wsParserNextOpt := WhiteSpaceParser{nextOptional: true}
|
||||
repeatParser := LoopedParser{wrappedParser: &TagParser{}, wsPaser: &wsParser}
|
||||
repeatParser := LoopedParser{wrappedParser: &TagParser{}, wsParser: &wsParser}
|
||||
elements = append(elements, &NameParser{}, &wsParser, &ValueParser{}, &wsParserNextOpt,
|
||||
&TimestampParser{optional: true}, &wsParserNextOpt, &repeatParser)
|
||||
return elements
|
||||
|
|
|
@ -17,7 +17,7 @@ const DEFAULT_TEMPLATE = "host.tags.measurement.field"
|
|||
|
||||
var (
|
||||
allowedChars = regexp.MustCompile(`[^a-zA-Z0-9-:._=\p{L}]`)
|
||||
hypenChars = strings.NewReplacer(
|
||||
hyphenChars = strings.NewReplacer(
|
||||
"/", "-",
|
||||
"@", "-",
|
||||
"*", "-",
|
||||
|
@ -308,8 +308,8 @@ func buildTags(tags map[string]string) string {
|
|||
}
|
||||
|
||||
func sanitize(value string) string {
|
||||
// Apply special hypenation rules to preserve backwards compatibility
|
||||
value = hypenChars.Replace(value)
|
||||
// Apply special hyphenation rules to preserve backwards compatibility
|
||||
value = hyphenChars.Replace(value)
|
||||
// Apply rule to drop some chars to preserve backwards compatibility
|
||||
value = dropChars.Replace(value)
|
||||
// Replace any remaining illegal chars
|
||||
|
|
|
@ -892,7 +892,7 @@ if __name__ == '__main__':
|
|||
help='Send build stats to InfluxDB using provided database name')
|
||||
parser.add_argument('--nightly',
|
||||
action='store_true',
|
||||
help='Mark build output as nightly build (will incremement the minor version)')
|
||||
help='Mark build output as nightly build (will increment the minor version)')
|
||||
parser.add_argument('--update',
|
||||
action='store_true',
|
||||
help='Update build dependencies prior to building')
|
||||
|
|
|
@ -17,7 +17,7 @@ import (
|
|||
)
|
||||
|
||||
var (
|
||||
registry *rgstry
|
||||
registry *Registry
|
||||
)
|
||||
|
||||
// Stat is an interface for dealing with telegraf statistics collected
|
||||
|
@ -109,12 +109,12 @@ func Metrics() []telegraf.Metric {
|
|||
return metrics
|
||||
}
|
||||
|
||||
type rgstry struct {
|
||||
type Registry struct {
|
||||
stats map[uint64]map[string]Stat
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
func (r *rgstry) register(measurement, field string, tags map[string]string) Stat {
|
||||
func (r *Registry) register(measurement, field string, tags map[string]string) Stat {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
|
||||
|
@ -137,7 +137,7 @@ func (r *rgstry) register(measurement, field string, tags map[string]string) Sta
|
|||
return s
|
||||
}
|
||||
|
||||
func (r *rgstry) registerTiming(measurement, field string, tags map[string]string) Stat {
|
||||
func (r *Registry) registerTiming(measurement, field string, tags map[string]string) Stat {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
|
||||
|
@ -160,7 +160,7 @@ func (r *rgstry) registerTiming(measurement, field string, tags map[string]strin
|
|||
return s
|
||||
}
|
||||
|
||||
func (r *rgstry) get(key uint64, field string) (Stat, bool) {
|
||||
func (r *Registry) get(key uint64, field string) (Stat, bool) {
|
||||
if _, ok := r.stats[key]; !ok {
|
||||
return nil, false
|
||||
}
|
||||
|
@ -172,7 +172,7 @@ func (r *rgstry) get(key uint64, field string) (Stat, bool) {
|
|||
return nil, false
|
||||
}
|
||||
|
||||
func (r *rgstry) set(key uint64, s Stat) {
|
||||
func (r *Registry) set(key uint64, s Stat) {
|
||||
if _, ok := r.stats[key]; !ok {
|
||||
r.stats[key] = make(map[string]Stat)
|
||||
}
|
||||
|
@ -201,7 +201,7 @@ func key(measurement string, tags map[string]string) uint64 {
|
|||
}
|
||||
|
||||
func init() {
|
||||
registry = &rgstry{
|
||||
registry = &Registry{
|
||||
stats: make(map[uint64]map[string]Stat),
|
||||
}
|
||||
}
|
||||
|
|
|
@ -18,7 +18,7 @@ var (
|
|||
|
||||
// testCleanup resets the global registry for test cleanup & unlocks the test lock
|
||||
func testCleanup() {
|
||||
registry = &rgstry{
|
||||
registry = &Registry{
|
||||
stats: make(map[uint64]map[string]Stat),
|
||||
}
|
||||
testLock.Unlock()
|
||||
|
|
Loading…
Reference in New Issue