Throughout telegraf, use telegraf.Metric rather than client.Point
closes #599
This commit is contained in:
committed by
Ryan Merrick
parent
5364a20825
commit
1edfa9bbd0
@@ -45,7 +45,7 @@ func (gh *GithubWebhooks) Gather(acc telegraf.Accumulator) error {
|
||||
gh.Lock()
|
||||
defer gh.Unlock()
|
||||
for _, event := range gh.events {
|
||||
p := event.NewPoint()
|
||||
p := event.NewMetric()
|
||||
acc.AddFields("github_webhooks", p.Fields(), p.Tags(), p.Time())
|
||||
}
|
||||
gh.events = make([]Event, 0)
|
||||
|
||||
@@ -5,13 +5,13 @@ import (
|
||||
"log"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/influxdb/client/v2"
|
||||
"github.com/influxdata/telegraf"
|
||||
)
|
||||
|
||||
const meas = "github_webhooks"
|
||||
|
||||
type Event interface {
|
||||
NewPoint() *client.Point
|
||||
NewMetric() telegraf.Metric
|
||||
}
|
||||
|
||||
type Repository struct {
|
||||
@@ -90,7 +90,7 @@ type CommitCommentEvent struct {
|
||||
Sender Sender `json:"sender"`
|
||||
}
|
||||
|
||||
func (s CommitCommentEvent) NewPoint() *client.Point {
|
||||
func (s CommitCommentEvent) NewMetric() telegraf.Metric {
|
||||
event := "commit_comment"
|
||||
t := map[string]string{
|
||||
"event": event,
|
||||
@@ -106,11 +106,11 @@ func (s CommitCommentEvent) NewPoint() *client.Point {
|
||||
"commit": s.Comment.Commit,
|
||||
"comment": s.Comment.Body,
|
||||
}
|
||||
p, err := client.NewPoint(meas, t, f, time.Now())
|
||||
m, err := telegraf.NewMetric(meas, t, f, time.Now())
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create %v event", event)
|
||||
}
|
||||
return p
|
||||
return m
|
||||
}
|
||||
|
||||
type CreateEvent struct {
|
||||
@@ -120,7 +120,7 @@ type CreateEvent struct {
|
||||
Sender Sender `json:"sender"`
|
||||
}
|
||||
|
||||
func (s CreateEvent) NewPoint() *client.Point {
|
||||
func (s CreateEvent) NewMetric() telegraf.Metric {
|
||||
event := "create"
|
||||
t := map[string]string{
|
||||
"event": event,
|
||||
@@ -136,11 +136,11 @@ func (s CreateEvent) NewPoint() *client.Point {
|
||||
"ref": s.Ref,
|
||||
"refType": s.RefType,
|
||||
}
|
||||
p, err := client.NewPoint(meas, t, f, time.Now())
|
||||
m, err := telegraf.NewMetric(meas, t, f, time.Now())
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create %v event", event)
|
||||
}
|
||||
return p
|
||||
return m
|
||||
}
|
||||
|
||||
type DeleteEvent struct {
|
||||
@@ -150,7 +150,7 @@ type DeleteEvent struct {
|
||||
Sender Sender `json:"sender"`
|
||||
}
|
||||
|
||||
func (s DeleteEvent) NewPoint() *client.Point {
|
||||
func (s DeleteEvent) NewMetric() telegraf.Metric {
|
||||
event := "delete"
|
||||
t := map[string]string{
|
||||
"event": event,
|
||||
@@ -166,11 +166,11 @@ func (s DeleteEvent) NewPoint() *client.Point {
|
||||
"ref": s.Ref,
|
||||
"refType": s.RefType,
|
||||
}
|
||||
p, err := client.NewPoint(meas, t, f, time.Now())
|
||||
m, err := telegraf.NewMetric(meas, t, f, time.Now())
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create %v event", event)
|
||||
}
|
||||
return p
|
||||
return m
|
||||
}
|
||||
|
||||
type DeploymentEvent struct {
|
||||
@@ -179,7 +179,7 @@ type DeploymentEvent struct {
|
||||
Sender Sender `json:"sender"`
|
||||
}
|
||||
|
||||
func (s DeploymentEvent) NewPoint() *client.Point {
|
||||
func (s DeploymentEvent) NewMetric() telegraf.Metric {
|
||||
event := "deployment"
|
||||
t := map[string]string{
|
||||
"event": event,
|
||||
@@ -197,11 +197,11 @@ func (s DeploymentEvent) NewPoint() *client.Point {
|
||||
"environment": s.Deployment.Environment,
|
||||
"description": s.Deployment.Description,
|
||||
}
|
||||
p, err := client.NewPoint(meas, t, f, time.Now())
|
||||
m, err := telegraf.NewMetric(meas, t, f, time.Now())
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create %v event", event)
|
||||
}
|
||||
return p
|
||||
return m
|
||||
}
|
||||
|
||||
type DeploymentStatusEvent struct {
|
||||
@@ -211,7 +211,7 @@ type DeploymentStatusEvent struct {
|
||||
Sender Sender `json:"sender"`
|
||||
}
|
||||
|
||||
func (s DeploymentStatusEvent) NewPoint() *client.Point {
|
||||
func (s DeploymentStatusEvent) NewMetric() telegraf.Metric {
|
||||
event := "delete"
|
||||
t := map[string]string{
|
||||
"event": event,
|
||||
@@ -231,11 +231,11 @@ func (s DeploymentStatusEvent) NewPoint() *client.Point {
|
||||
"depState": s.DeploymentStatus.State,
|
||||
"depDescription": s.DeploymentStatus.Description,
|
||||
}
|
||||
p, err := client.NewPoint(meas, t, f, time.Now())
|
||||
m, err := telegraf.NewMetric(meas, t, f, time.Now())
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create %v event", event)
|
||||
}
|
||||
return p
|
||||
return m
|
||||
}
|
||||
|
||||
type ForkEvent struct {
|
||||
@@ -244,7 +244,7 @@ type ForkEvent struct {
|
||||
Sender Sender `json:"sender"`
|
||||
}
|
||||
|
||||
func (s ForkEvent) NewPoint() *client.Point {
|
||||
func (s ForkEvent) NewMetric() telegraf.Metric {
|
||||
event := "fork"
|
||||
t := map[string]string{
|
||||
"event": event,
|
||||
@@ -259,11 +259,11 @@ func (s ForkEvent) NewPoint() *client.Point {
|
||||
"issues": s.Repository.Issues,
|
||||
"fork": s.Forkee.Repository,
|
||||
}
|
||||
p, err := client.NewPoint(meas, t, f, time.Now())
|
||||
m, err := telegraf.NewMetric(meas, t, f, time.Now())
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create %v event", event)
|
||||
}
|
||||
return p
|
||||
return m
|
||||
}
|
||||
|
||||
type GollumEvent struct {
|
||||
@@ -273,7 +273,7 @@ type GollumEvent struct {
|
||||
}
|
||||
|
||||
// REVIEW: Going to be lazy and not deal with the pages.
|
||||
func (s GollumEvent) NewPoint() *client.Point {
|
||||
func (s GollumEvent) NewMetric() telegraf.Metric {
|
||||
event := "gollum"
|
||||
t := map[string]string{
|
||||
"event": event,
|
||||
@@ -287,11 +287,11 @@ func (s GollumEvent) NewPoint() *client.Point {
|
||||
"forks": s.Repository.Forks,
|
||||
"issues": s.Repository.Issues,
|
||||
}
|
||||
p, err := client.NewPoint(meas, t, f, time.Now())
|
||||
m, err := telegraf.NewMetric(meas, t, f, time.Now())
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create %v event", event)
|
||||
}
|
||||
return p
|
||||
return m
|
||||
}
|
||||
|
||||
type IssueCommentEvent struct {
|
||||
@@ -301,7 +301,7 @@ type IssueCommentEvent struct {
|
||||
Sender Sender `json:"sender"`
|
||||
}
|
||||
|
||||
func (s IssueCommentEvent) NewPoint() *client.Point {
|
||||
func (s IssueCommentEvent) NewMetric() telegraf.Metric {
|
||||
event := "issue_comment"
|
||||
t := map[string]string{
|
||||
"event": event,
|
||||
@@ -319,11 +319,11 @@ func (s IssueCommentEvent) NewPoint() *client.Point {
|
||||
"comments": s.Issue.Comments,
|
||||
"body": s.Comment.Body,
|
||||
}
|
||||
p, err := client.NewPoint(meas, t, f, time.Now())
|
||||
m, err := telegraf.NewMetric(meas, t, f, time.Now())
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create %v event", event)
|
||||
}
|
||||
return p
|
||||
return m
|
||||
}
|
||||
|
||||
type IssuesEvent struct {
|
||||
@@ -333,7 +333,7 @@ type IssuesEvent struct {
|
||||
Sender Sender `json:"sender"`
|
||||
}
|
||||
|
||||
func (s IssuesEvent) NewPoint() *client.Point {
|
||||
func (s IssuesEvent) NewMetric() telegraf.Metric {
|
||||
event := "issue"
|
||||
t := map[string]string{
|
||||
"event": event,
|
||||
@@ -351,11 +351,11 @@ func (s IssuesEvent) NewPoint() *client.Point {
|
||||
"title": s.Issue.Title,
|
||||
"comments": s.Issue.Comments,
|
||||
}
|
||||
p, err := client.NewPoint(meas, t, f, time.Now())
|
||||
m, err := telegraf.NewMetric(meas, t, f, time.Now())
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create %v event", event)
|
||||
}
|
||||
return p
|
||||
return m
|
||||
}
|
||||
|
||||
type MemberEvent struct {
|
||||
@@ -364,7 +364,7 @@ type MemberEvent struct {
|
||||
Sender Sender `json:"sender"`
|
||||
}
|
||||
|
||||
func (s MemberEvent) NewPoint() *client.Point {
|
||||
func (s MemberEvent) NewMetric() telegraf.Metric {
|
||||
event := "member"
|
||||
t := map[string]string{
|
||||
"event": event,
|
||||
@@ -380,11 +380,11 @@ func (s MemberEvent) NewPoint() *client.Point {
|
||||
"newMember": s.Member.User,
|
||||
"newMemberStatus": s.Member.Admin,
|
||||
}
|
||||
p, err := client.NewPoint(meas, t, f, time.Now())
|
||||
m, err := telegraf.NewMetric(meas, t, f, time.Now())
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create %v event", event)
|
||||
}
|
||||
return p
|
||||
return m
|
||||
}
|
||||
|
||||
type MembershipEvent struct {
|
||||
@@ -394,7 +394,7 @@ type MembershipEvent struct {
|
||||
Team Team `json:"team"`
|
||||
}
|
||||
|
||||
func (s MembershipEvent) NewPoint() *client.Point {
|
||||
func (s MembershipEvent) NewMetric() telegraf.Metric {
|
||||
event := "membership"
|
||||
t := map[string]string{
|
||||
"event": event,
|
||||
@@ -406,11 +406,11 @@ func (s MembershipEvent) NewPoint() *client.Point {
|
||||
"newMember": s.Member.User,
|
||||
"newMemberStatus": s.Member.Admin,
|
||||
}
|
||||
p, err := client.NewPoint(meas, t, f, time.Now())
|
||||
m, err := telegraf.NewMetric(meas, t, f, time.Now())
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create %v event", event)
|
||||
}
|
||||
return p
|
||||
return m
|
||||
}
|
||||
|
||||
type PageBuildEvent struct {
|
||||
@@ -418,7 +418,7 @@ type PageBuildEvent struct {
|
||||
Sender Sender `json:"sender"`
|
||||
}
|
||||
|
||||
func (s PageBuildEvent) NewPoint() *client.Point {
|
||||
func (s PageBuildEvent) NewMetric() telegraf.Metric {
|
||||
event := "page_build"
|
||||
t := map[string]string{
|
||||
"event": event,
|
||||
@@ -432,11 +432,11 @@ func (s PageBuildEvent) NewPoint() *client.Point {
|
||||
"forks": s.Repository.Forks,
|
||||
"issues": s.Repository.Issues,
|
||||
}
|
||||
p, err := client.NewPoint(meas, t, f, time.Now())
|
||||
m, err := telegraf.NewMetric(meas, t, f, time.Now())
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create %v event", event)
|
||||
}
|
||||
return p
|
||||
return m
|
||||
}
|
||||
|
||||
type PublicEvent struct {
|
||||
@@ -444,7 +444,7 @@ type PublicEvent struct {
|
||||
Sender Sender `json:"sender"`
|
||||
}
|
||||
|
||||
func (s PublicEvent) NewPoint() *client.Point {
|
||||
func (s PublicEvent) NewMetric() telegraf.Metric {
|
||||
event := "public"
|
||||
t := map[string]string{
|
||||
"event": event,
|
||||
@@ -458,11 +458,11 @@ func (s PublicEvent) NewPoint() *client.Point {
|
||||
"forks": s.Repository.Forks,
|
||||
"issues": s.Repository.Issues,
|
||||
}
|
||||
p, err := client.NewPoint(meas, t, f, time.Now())
|
||||
m, err := telegraf.NewMetric(meas, t, f, time.Now())
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create %v event", event)
|
||||
}
|
||||
return p
|
||||
return m
|
||||
}
|
||||
|
||||
type PullRequestEvent struct {
|
||||
@@ -472,7 +472,7 @@ type PullRequestEvent struct {
|
||||
Sender Sender `json:"sender"`
|
||||
}
|
||||
|
||||
func (s PullRequestEvent) NewPoint() *client.Point {
|
||||
func (s PullRequestEvent) NewMetric() telegraf.Metric {
|
||||
event := "pull_request"
|
||||
t := map[string]string{
|
||||
"event": event,
|
||||
@@ -495,11 +495,11 @@ func (s PullRequestEvent) NewPoint() *client.Point {
|
||||
"deletions": s.PullRequest.Deletions,
|
||||
"changedFiles": s.PullRequest.ChangedFiles,
|
||||
}
|
||||
p, err := client.NewPoint(meas, t, f, time.Now())
|
||||
m, err := telegraf.NewMetric(meas, t, f, time.Now())
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create %v event", event)
|
||||
}
|
||||
return p
|
||||
return m
|
||||
}
|
||||
|
||||
type PullRequestReviewCommentEvent struct {
|
||||
@@ -509,7 +509,7 @@ type PullRequestReviewCommentEvent struct {
|
||||
Sender Sender `json:"sender"`
|
||||
}
|
||||
|
||||
func (s PullRequestReviewCommentEvent) NewPoint() *client.Point {
|
||||
func (s PullRequestReviewCommentEvent) NewMetric() telegraf.Metric {
|
||||
event := "pull_request_review_comment"
|
||||
t := map[string]string{
|
||||
"event": event,
|
||||
@@ -533,11 +533,11 @@ func (s PullRequestReviewCommentEvent) NewPoint() *client.Point {
|
||||
"commentFile": s.Comment.File,
|
||||
"comment": s.Comment.Comment,
|
||||
}
|
||||
p, err := client.NewPoint(meas, t, f, time.Now())
|
||||
m, err := telegraf.NewMetric(meas, t, f, time.Now())
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create %v event", event)
|
||||
}
|
||||
return p
|
||||
return m
|
||||
}
|
||||
|
||||
type PushEvent struct {
|
||||
@@ -548,7 +548,7 @@ type PushEvent struct {
|
||||
Sender Sender `json:"sender"`
|
||||
}
|
||||
|
||||
func (s PushEvent) NewPoint() *client.Point {
|
||||
func (s PushEvent) NewMetric() telegraf.Metric {
|
||||
event := "push"
|
||||
t := map[string]string{
|
||||
"event": event,
|
||||
@@ -565,11 +565,11 @@ func (s PushEvent) NewPoint() *client.Point {
|
||||
"before": s.Before,
|
||||
"after": s.After,
|
||||
}
|
||||
p, err := client.NewPoint(meas, t, f, time.Now())
|
||||
m, err := telegraf.NewMetric(meas, t, f, time.Now())
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create %v event", event)
|
||||
}
|
||||
return p
|
||||
return m
|
||||
}
|
||||
|
||||
type ReleaseEvent struct {
|
||||
@@ -578,7 +578,7 @@ type ReleaseEvent struct {
|
||||
Sender Sender `json:"sender"`
|
||||
}
|
||||
|
||||
func (s ReleaseEvent) NewPoint() *client.Point {
|
||||
func (s ReleaseEvent) NewMetric() telegraf.Metric {
|
||||
event := "release"
|
||||
t := map[string]string{
|
||||
"event": event,
|
||||
@@ -593,11 +593,11 @@ func (s ReleaseEvent) NewPoint() *client.Point {
|
||||
"issues": s.Repository.Issues,
|
||||
"tagName": s.Release.TagName,
|
||||
}
|
||||
p, err := client.NewPoint(meas, t, f, time.Now())
|
||||
m, err := telegraf.NewMetric(meas, t, f, time.Now())
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create %v event", event)
|
||||
}
|
||||
return p
|
||||
return m
|
||||
}
|
||||
|
||||
type RepositoryEvent struct {
|
||||
@@ -605,7 +605,7 @@ type RepositoryEvent struct {
|
||||
Sender Sender `json:"sender"`
|
||||
}
|
||||
|
||||
func (s RepositoryEvent) NewPoint() *client.Point {
|
||||
func (s RepositoryEvent) NewMetric() telegraf.Metric {
|
||||
event := "repository"
|
||||
t := map[string]string{
|
||||
"event": event,
|
||||
@@ -619,11 +619,11 @@ func (s RepositoryEvent) NewPoint() *client.Point {
|
||||
"forks": s.Repository.Forks,
|
||||
"issues": s.Repository.Issues,
|
||||
}
|
||||
p, err := client.NewPoint(meas, t, f, time.Now())
|
||||
m, err := telegraf.NewMetric(meas, t, f, time.Now())
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create %v event", event)
|
||||
}
|
||||
return p
|
||||
return m
|
||||
}
|
||||
|
||||
type StatusEvent struct {
|
||||
@@ -633,7 +633,7 @@ type StatusEvent struct {
|
||||
Sender Sender `json:"sender"`
|
||||
}
|
||||
|
||||
func (s StatusEvent) NewPoint() *client.Point {
|
||||
func (s StatusEvent) NewMetric() telegraf.Metric {
|
||||
event := "status"
|
||||
t := map[string]string{
|
||||
"event": event,
|
||||
@@ -649,11 +649,11 @@ func (s StatusEvent) NewPoint() *client.Point {
|
||||
"commit": s.Commit,
|
||||
"state": s.State,
|
||||
}
|
||||
p, err := client.NewPoint(meas, t, f, time.Now())
|
||||
m, err := telegraf.NewMetric(meas, t, f, time.Now())
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create %v event", event)
|
||||
}
|
||||
return p
|
||||
return m
|
||||
}
|
||||
|
||||
type TeamAddEvent struct {
|
||||
@@ -662,7 +662,7 @@ type TeamAddEvent struct {
|
||||
Sender Sender `json:"sender"`
|
||||
}
|
||||
|
||||
func (s TeamAddEvent) NewPoint() *client.Point {
|
||||
func (s TeamAddEvent) NewMetric() telegraf.Metric {
|
||||
event := "team_add"
|
||||
t := map[string]string{
|
||||
"event": event,
|
||||
@@ -677,11 +677,11 @@ func (s TeamAddEvent) NewPoint() *client.Point {
|
||||
"issues": s.Repository.Issues,
|
||||
"teamName": s.Team.Name,
|
||||
}
|
||||
p, err := client.NewPoint(meas, t, f, time.Now())
|
||||
m, err := telegraf.NewMetric(meas, t, f, time.Now())
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create %v event", event)
|
||||
}
|
||||
return p
|
||||
return m
|
||||
}
|
||||
|
||||
type WatchEvent struct {
|
||||
@@ -689,7 +689,7 @@ type WatchEvent struct {
|
||||
Sender Sender `json:"sender"`
|
||||
}
|
||||
|
||||
func (s WatchEvent) NewPoint() *client.Point {
|
||||
func (s WatchEvent) NewMetric() telegraf.Metric {
|
||||
event := "delete"
|
||||
t := map[string]string{
|
||||
"event": event,
|
||||
@@ -703,9 +703,9 @@ func (s WatchEvent) NewPoint() *client.Point {
|
||||
"forks": s.Repository.Forks,
|
||||
"issues": s.Repository.Issues,
|
||||
}
|
||||
p, err := client.NewPoint(meas, t, f, time.Now())
|
||||
m, err := telegraf.NewMetric(meas, t, f, time.Now())
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create %v event", event)
|
||||
}
|
||||
return p
|
||||
return m
|
||||
}
|
||||
|
||||
@@ -136,7 +136,7 @@ func TestHttpJson200(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 12, acc.NFields())
|
||||
// Set responsetime
|
||||
for _, p := range acc.Points {
|
||||
for _, p := range acc.Metrics {
|
||||
p.Fields["response_time"] = 1.0
|
||||
}
|
||||
|
||||
@@ -203,7 +203,7 @@ func TestHttpJson200Tags(t *testing.T) {
|
||||
var acc testutil.Accumulator
|
||||
err := service.Gather(&acc)
|
||||
// Set responsetime
|
||||
for _, p := range acc.Points {
|
||||
for _, p := range acc.Metrics {
|
||||
p.Fields["response_time"] = 1.0
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -71,7 +71,7 @@ func TestBasic(t *testing.T) {
|
||||
var acc testutil.Accumulator
|
||||
require.NoError(t, plugin.Gather(&acc))
|
||||
|
||||
require.Len(t, acc.Points, 2)
|
||||
require.Len(t, acc.Metrics, 2)
|
||||
fields := map[string]interface{}{
|
||||
// JSON will truncate floats to integer representations.
|
||||
// Since there's no distinction in JSON, we can't assume it's an int.
|
||||
|
||||
@@ -85,7 +85,7 @@ func TestHttpJsonMultiValue(t *testing.T) {
|
||||
err := jolokia.Gather(&acc)
|
||||
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, 1, len(acc.Points))
|
||||
assert.Equal(t, 1, len(acc.Metrics))
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"heap_memory_usage_init": 67108864.0,
|
||||
@@ -112,5 +112,5 @@ func TestHttpJsonOn404(t *testing.T) {
|
||||
err := jolokia.Gather(&acc)
|
||||
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, 0, len(acc.Points))
|
||||
assert.Equal(t, 0, len(acc.Metrics))
|
||||
}
|
||||
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/influxdata/influxdb/models"
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/plugins/inputs"
|
||||
|
||||
@@ -28,8 +27,8 @@ type Kafka struct {
|
||||
// channel for all kafka consumer errors
|
||||
errs <-chan *sarama.ConsumerError
|
||||
// channel for all incoming parsed kafka points
|
||||
pointChan chan models.Point
|
||||
done chan struct{}
|
||||
metricC chan telegraf.Metric
|
||||
done chan struct{}
|
||||
|
||||
// doNotCommitMsgs tells the parser not to call CommitUpTo on the consumer
|
||||
// this is mostly for test purposes, but there may be a use-case for it later.
|
||||
@@ -94,7 +93,7 @@ func (k *Kafka) Start() error {
|
||||
if k.PointBuffer == 0 {
|
||||
k.PointBuffer = 100000
|
||||
}
|
||||
k.pointChan = make(chan models.Point, k.PointBuffer)
|
||||
k.metricC = make(chan telegraf.Metric, k.PointBuffer)
|
||||
|
||||
// Start the kafka message reader
|
||||
go k.parser()
|
||||
@@ -113,18 +112,18 @@ func (k *Kafka) parser() {
|
||||
case err := <-k.errs:
|
||||
log.Printf("Kafka Consumer Error: %s\n", err.Error())
|
||||
case msg := <-k.in:
|
||||
points, err := models.ParsePoints(msg.Value)
|
||||
metrics, err := telegraf.ParseMetrics(msg.Value)
|
||||
if err != nil {
|
||||
log.Printf("Could not parse kafka message: %s, error: %s",
|
||||
string(msg.Value), err.Error())
|
||||
}
|
||||
|
||||
for _, point := range points {
|
||||
for _, metric := range metrics {
|
||||
select {
|
||||
case k.pointChan <- point:
|
||||
case k.metricC <- metric:
|
||||
continue
|
||||
default:
|
||||
log.Printf("Kafka Consumer buffer is full, dropping a point." +
|
||||
log.Printf("Kafka Consumer buffer is full, dropping a metric." +
|
||||
" You may want to increase the point_buffer setting")
|
||||
}
|
||||
}
|
||||
@@ -152,9 +151,9 @@ func (k *Kafka) Stop() {
|
||||
func (k *Kafka) Gather(acc telegraf.Accumulator) error {
|
||||
k.Lock()
|
||||
defer k.Unlock()
|
||||
npoints := len(k.pointChan)
|
||||
npoints := len(k.metricC)
|
||||
for i := 0; i < npoints; i++ {
|
||||
point := <-k.pointChan
|
||||
point := <-k.metricC
|
||||
acc.AddFields(point.Name(), point.Fields(), point.Tags(), point.Time())
|
||||
}
|
||||
return nil
|
||||
|
||||
@@ -51,13 +51,13 @@ func TestReadsMetricsFromKafka(t *testing.T) {
|
||||
// Verify that we can now gather the sent message
|
||||
var acc testutil.Accumulator
|
||||
// Sanity check
|
||||
assert.Equal(t, 0, len(acc.Points), "There should not be any points")
|
||||
assert.Equal(t, 0, len(acc.Metrics), "There should not be any points")
|
||||
|
||||
// Gather points
|
||||
err = k.Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
if len(acc.Points) == 1 {
|
||||
point := acc.Points[0]
|
||||
if len(acc.Metrics) == 1 {
|
||||
point := acc.Metrics[0]
|
||||
assert.Equal(t, "cpu_load_short", point.Measurement)
|
||||
assert.Equal(t, map[string]interface{}{"value": 23422.0}, point.Fields)
|
||||
assert.Equal(t, map[string]string{
|
||||
@@ -83,7 +83,7 @@ func waitForPoint(k *Kafka, t *testing.T) {
|
||||
counter++
|
||||
if counter > 1000 {
|
||||
t.Fatal("Waited for 5s, point never arrived to consumer")
|
||||
} else if len(k.pointChan) == 1 {
|
||||
} else if len(k.metricC) == 1 {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,7 +4,7 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/influxdb/models"
|
||||
"github.com/influxdata/telegraf"
|
||||
"github.com/influxdata/telegraf/testutil"
|
||||
|
||||
"github.com/Shopify/sarama"
|
||||
@@ -29,7 +29,7 @@ func NewTestKafka() (*Kafka, chan *sarama.ConsumerMessage) {
|
||||
doNotCommitMsgs: true,
|
||||
errs: make(chan *sarama.ConsumerError, pointBuffer),
|
||||
done: make(chan struct{}),
|
||||
pointChan: make(chan models.Point, pointBuffer),
|
||||
metricC: make(chan telegraf.Metric, pointBuffer),
|
||||
}
|
||||
return &k, in
|
||||
}
|
||||
@@ -43,7 +43,7 @@ func TestRunParser(t *testing.T) {
|
||||
in <- saramaMsg(testMsg)
|
||||
time.Sleep(time.Millisecond)
|
||||
|
||||
assert.Equal(t, len(k.pointChan), 1)
|
||||
assert.Equal(t, len(k.metricC), 1)
|
||||
}
|
||||
|
||||
// Test that the parser ignores invalid messages
|
||||
@@ -55,7 +55,7 @@ func TestRunParserInvalidMsg(t *testing.T) {
|
||||
in <- saramaMsg(invalidMsg)
|
||||
time.Sleep(time.Millisecond)
|
||||
|
||||
assert.Equal(t, len(k.pointChan), 0)
|
||||
assert.Equal(t, len(k.metricC), 0)
|
||||
}
|
||||
|
||||
// Test that points are dropped when we hit the buffer limit
|
||||
@@ -69,7 +69,7 @@ func TestRunParserRespectsBuffer(t *testing.T) {
|
||||
}
|
||||
time.Sleep(time.Millisecond)
|
||||
|
||||
assert.Equal(t, len(k.pointChan), 5)
|
||||
assert.Equal(t, len(k.metricC), 5)
|
||||
}
|
||||
|
||||
// Test that the parser parses kafka messages into points
|
||||
@@ -84,7 +84,7 @@ func TestRunParserAndGather(t *testing.T) {
|
||||
acc := testutil.Accumulator{}
|
||||
k.Gather(&acc)
|
||||
|
||||
assert.Equal(t, len(acc.Points), 1)
|
||||
assert.Equal(t, len(acc.Metrics), 1)
|
||||
acc.AssertContainsFields(t, "cpu_load_short",
|
||||
map[string]interface{}{"value": float64(23422)})
|
||||
}
|
||||
|
||||
@@ -113,7 +113,7 @@ func TestPostgresqlDefaultsToAllDatabases(t *testing.T) {
|
||||
|
||||
var found bool
|
||||
|
||||
for _, pnt := range acc.Points {
|
||||
for _, pnt := range acc.Metrics {
|
||||
if pnt.Measurement == "postgresql" {
|
||||
if pnt.Tags["db"] == "postgres" {
|
||||
found = true
|
||||
|
||||
@@ -123,7 +123,7 @@ func assertContainsTaggedFloat(
|
||||
tags map[string]string,
|
||||
) {
|
||||
var actualValue float64
|
||||
for _, pt := range acc.Points {
|
||||
for _, pt := range acc.Metrics {
|
||||
if pt.Measurement == measurement {
|
||||
for fieldname, value := range pt.Fields {
|
||||
if fieldname == field {
|
||||
|
||||
@@ -57,9 +57,9 @@ func TestDiskStats(t *testing.T) {
|
||||
err = (&DiskStats{ps: &mps}).Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
numDiskPoints := acc.NFields()
|
||||
expectedAllDiskPoints := 14
|
||||
assert.Equal(t, expectedAllDiskPoints, numDiskPoints)
|
||||
numDiskMetrics := acc.NFields()
|
||||
expectedAllDiskMetrics := 14
|
||||
assert.Equal(t, expectedAllDiskMetrics, numDiskMetrics)
|
||||
|
||||
tags1 := map[string]string{
|
||||
"path": "/",
|
||||
@@ -91,15 +91,15 @@ func TestDiskStats(t *testing.T) {
|
||||
acc.AssertContainsTaggedFields(t, "disk", fields1, tags1)
|
||||
acc.AssertContainsTaggedFields(t, "disk", fields2, tags2)
|
||||
|
||||
// We expect 6 more DiskPoints to show up with an explicit match on "/"
|
||||
// We expect 6 more DiskMetrics to show up with an explicit match on "/"
|
||||
// and /home not matching the /dev in MountPoints
|
||||
err = (&DiskStats{ps: &mps, MountPoints: []string{"/", "/dev"}}).Gather(&acc)
|
||||
assert.Equal(t, expectedAllDiskPoints+7, acc.NFields())
|
||||
assert.Equal(t, expectedAllDiskMetrics+7, acc.NFields())
|
||||
|
||||
// We should see all the diskpoints as MountPoints includes both
|
||||
// / and /home
|
||||
err = (&DiskStats{ps: &mps, MountPoints: []string{"/", "/home"}}).Gather(&acc)
|
||||
assert.Equal(t, 2*expectedAllDiskPoints+7, acc.NFields())
|
||||
assert.Equal(t, 2*expectedAllDiskMetrics+7, acc.NFields())
|
||||
}
|
||||
|
||||
// func TestDiskIOStats(t *testing.T) {
|
||||
@@ -138,9 +138,9 @@ func TestDiskStats(t *testing.T) {
|
||||
// err = (&DiskIOStats{ps: &mps}).Gather(&acc)
|
||||
// require.NoError(t, err)
|
||||
|
||||
// numDiskIOPoints := acc.NFields()
|
||||
// expectedAllDiskIOPoints := 14
|
||||
// assert.Equal(t, expectedAllDiskIOPoints, numDiskIOPoints)
|
||||
// numDiskIOMetrics := acc.NFields()
|
||||
// expectedAllDiskIOMetrics := 14
|
||||
// assert.Equal(t, expectedAllDiskIOMetrics, numDiskIOMetrics)
|
||||
|
||||
// dtags1 := map[string]string{
|
||||
// "name": "sda1",
|
||||
@@ -166,10 +166,10 @@ func TestDiskStats(t *testing.T) {
|
||||
// assert.True(t, acc.CheckTaggedValue("write_time", uint64(6087), dtags2))
|
||||
// assert.True(t, acc.CheckTaggedValue("io_time", uint64(246552), dtags2))
|
||||
|
||||
// // We expect 7 more DiskIOPoints to show up with an explicit match on "sdb1"
|
||||
// // We expect 7 more DiskIOMetrics to show up with an explicit match on "sdb1"
|
||||
// // and serial should be missing from the tags with SkipSerialNumber set
|
||||
// err = (&DiskIOStats{ps: &mps, Devices: []string{"sdb1"}, SkipSerialNumber: true}).Gather(&acc)
|
||||
// assert.Equal(t, expectedAllDiskIOPoints+7, acc.NFields())
|
||||
// assert.Equal(t, expectedAllDiskIOMetrics+7, acc.NFields())
|
||||
|
||||
// dtags3 := map[string]string{
|
||||
// "name": "sdb1",
|
||||
|
||||
@@ -55,7 +55,7 @@ func TestMemStats(t *testing.T) {
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "mem", memfields, make(map[string]string))
|
||||
|
||||
acc.Points = nil
|
||||
acc.Metrics = nil
|
||||
|
||||
err = (&SwapStats{&mps}).Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -85,7 +85,7 @@ func TestNetStats(t *testing.T) {
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "net", fields2, ntags)
|
||||
|
||||
acc.Points = nil
|
||||
acc.Metrics = nil
|
||||
|
||||
err = (&NetStats{&mps}).Gather(&acc)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -148,7 +148,7 @@ func TestZfsPoolMetrics(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
require.False(t, acc.HasMeasurement("zfs_pool"))
|
||||
acc.Points = nil
|
||||
acc.Metrics = nil
|
||||
|
||||
z = &Zfs{KstatPath: testKstatPath, KstatMetrics: []string{"arcstats"}, PoolMetrics: true}
|
||||
err = z.Gather(&acc)
|
||||
@@ -198,7 +198,7 @@ func TestZfsGeneratesMetrics(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
acc.AssertContainsTaggedFields(t, "zfs", intMetrics, tags)
|
||||
acc.Points = nil
|
||||
acc.Metrics = nil
|
||||
|
||||
//two pools, all metrics
|
||||
err = os.MkdirAll(testKstatPath+"/STORAGE", 0755)
|
||||
@@ -217,7 +217,7 @@ func TestZfsGeneratesMetrics(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
acc.AssertContainsTaggedFields(t, "zfs", intMetrics, tags)
|
||||
acc.Points = nil
|
||||
acc.Metrics = nil
|
||||
|
||||
intMetrics = getKstatMetricsArcOnly()
|
||||
|
||||
|
||||
Reference in New Issue
Block a user