Throughout telegraf, use telegraf.Metric rather than client.Point

closes #599
This commit is contained in:
Cameron Sparr 2016-01-27 16:15:14 -07:00 committed by Ryan Merrick
parent 5364a20825
commit 1edfa9bbd0
52 changed files with 391 additions and 437 deletions

View File

@ -3,8 +3,10 @@
### Release Notes ### Release Notes
### Features ### Features
- [#564](https://github.com/influxdata/telegraf/issues/564): features for plugin writing simplification. Internal metric data type.
### Bugfixes ### Bugfixes
- [#599](https://github.com/influxdata/telegraf/issues/599): datadog plugin tags not working.
## v0.10.1 [2016-01-27] ## v0.10.1 [2016-01-27]

View File

@ -37,7 +37,7 @@ and submit new inputs.
### Input Plugin Guidelines ### Input Plugin Guidelines
* A plugin must conform to the `inputs.Input` interface. * A plugin must conform to the `telegraf.Input` interface.
* Input Plugins should call `inputs.Add` in their `init` function to register themselves. * Input Plugins should call `inputs.Add` in their `init` function to register themselves.
See below for a quick example. See below for a quick example.
* Input Plugins must be added to the * Input Plugins must be added to the
@ -97,7 +97,10 @@ package simple
// simple.go // simple.go
import "github.com/influxdata/telegraf/plugins/inputs" import (
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/inputs"
)
type Simple struct { type Simple struct {
Ok bool Ok bool
@ -122,7 +125,7 @@ func (s *Simple) Gather(acc inputs.Accumulator) error {
} }
func init() { func init() {
inputs.Add("simple", func() inputs.Input { return &Simple{} }) inputs.Add("simple", func() telegraf.Input { return &Simple{} })
} }
``` ```
@ -182,7 +185,7 @@ type Output interface {
Close() error Close() error
Description() string Description() string
SampleConfig() string SampleConfig() string
Write(points []*client.Point) error Write(metrics []telegraf.Metric) error
} }
``` ```
@ -193,7 +196,10 @@ package simpleoutput
// simpleoutput.go // simpleoutput.go
import "github.com/influxdata/telegraf/plugins/outputs" import (
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/outputs"
)
type Simple struct { type Simple struct {
Ok bool Ok bool
@ -217,7 +223,7 @@ func (s *Simple) Close() error {
return nil return nil
} }
func (s *Simple) Write(points []*client.Point) error { func (s *Simple) Write(metrics []telegraf.Metric) error {
for _, pt := range points { for _, pt := range points {
// write `pt` to the output sink here // write `pt` to the output sink here
} }
@ -225,7 +231,7 @@ func (s *Simple) Write(points []*client.Point) error {
} }
func init() { func init() {
outputs.Add("simpleoutput", func() outputs.Output { return &Simple{} }) outputs.Add("simpleoutput", func() telegraf.Output { return &Simple{} })
} }
``` ```
@ -253,7 +259,7 @@ type ServiceOutput interface {
Close() error Close() error
Description() string Description() string
SampleConfig() string SampleConfig() string
Write(points []*client.Point) error Write(metrics []telegraf.Metric) error
Start() error Start() error
Stop() Stop()
} }

View File

@ -7,17 +7,16 @@ import (
"sync" "sync"
"time" "time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal/models" "github.com/influxdata/telegraf/internal/models"
"github.com/influxdata/influxdb/client/v2"
) )
func NewAccumulator( func NewAccumulator(
inputConfig *internal_models.InputConfig, inputConfig *internal_models.InputConfig,
points chan *client.Point, metrics chan telegraf.Metric,
) *accumulator { ) *accumulator {
acc := accumulator{} acc := accumulator{}
acc.points = points acc.metrics = metrics
acc.inputConfig = inputConfig acc.inputConfig = inputConfig
return &acc return &acc
} }
@ -25,7 +24,7 @@ func NewAccumulator(
type accumulator struct { type accumulator struct {
sync.Mutex sync.Mutex
points chan *client.Point metrics chan telegraf.Metric
defaultTags map[string]string defaultTags map[string]string
@ -136,15 +135,15 @@ func (ac *accumulator) AddFields(
measurement = ac.prefix + measurement measurement = ac.prefix + measurement
} }
pt, err := client.NewPoint(measurement, tags, result, timestamp) m, err := telegraf.NewMetric(measurement, tags, result, timestamp)
if err != nil { if err != nil {
log.Printf("Error adding point [%s]: %s\n", measurement, err.Error()) log.Printf("Error adding point [%s]: %s\n", measurement, err.Error())
return return
} }
if ac.debug { if ac.debug {
fmt.Println("> " + pt.String()) fmt.Println("> " + m.String())
} }
ac.points <- pt ac.metrics <- m
} }
func (ac *accumulator) Debug() bool { func (ac *accumulator) Debug() bool {

View File

@ -14,8 +14,6 @@ import (
"github.com/influxdata/telegraf" "github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal/config" "github.com/influxdata/telegraf/internal/config"
"github.com/influxdata/telegraf/internal/models" "github.com/influxdata/telegraf/internal/models"
"github.com/influxdata/influxdb/client/v2"
) )
// Agent runs telegraf and collects data based on the given config // Agent runs telegraf and collects data based on the given config
@ -101,7 +99,7 @@ func panicRecover(input *internal_models.RunningInput) {
// gatherParallel runs the inputs that are using the same reporting interval // gatherParallel runs the inputs that are using the same reporting interval
// as the telegraf agent. // as the telegraf agent.
func (a *Agent) gatherParallel(pointChan chan *client.Point) error { func (a *Agent) gatherParallel(metricC chan telegraf.Metric) error {
var wg sync.WaitGroup var wg sync.WaitGroup
start := time.Now() start := time.Now()
@ -118,7 +116,7 @@ func (a *Agent) gatherParallel(pointChan chan *client.Point) error {
defer panicRecover(input) defer panicRecover(input)
defer wg.Done() defer wg.Done()
acc := NewAccumulator(input.Config, pointChan) acc := NewAccumulator(input.Config, metricC)
acc.SetDebug(a.Config.Agent.Debug) acc.SetDebug(a.Config.Agent.Debug)
acc.setDefaultTags(a.Config.Tags) acc.setDefaultTags(a.Config.Tags)
@ -159,7 +157,7 @@ func (a *Agent) gatherParallel(pointChan chan *client.Point) error {
func (a *Agent) gatherSeparate( func (a *Agent) gatherSeparate(
shutdown chan struct{}, shutdown chan struct{},
input *internal_models.RunningInput, input *internal_models.RunningInput,
pointChan chan *client.Point, metricC chan telegraf.Metric,
) error { ) error {
defer panicRecover(input) defer panicRecover(input)
@ -169,7 +167,7 @@ func (a *Agent) gatherSeparate(
var outerr error var outerr error
start := time.Now() start := time.Now()
acc := NewAccumulator(input.Config, pointChan) acc := NewAccumulator(input.Config, metricC)
acc.SetDebug(a.Config.Agent.Debug) acc.SetDebug(a.Config.Agent.Debug)
acc.setDefaultTags(a.Config.Tags) acc.setDefaultTags(a.Config.Tags)
@ -201,13 +199,13 @@ func (a *Agent) gatherSeparate(
func (a *Agent) Test() error { func (a *Agent) Test() error {
shutdown := make(chan struct{}) shutdown := make(chan struct{})
defer close(shutdown) defer close(shutdown)
pointChan := make(chan *client.Point) metricC := make(chan telegraf.Metric)
// dummy receiver for the point channel // dummy receiver for the point channel
go func() { go func() {
for { for {
select { select {
case <-pointChan: case <-metricC:
// do nothing // do nothing
case <-shutdown: case <-shutdown:
return return
@ -216,7 +214,7 @@ func (a *Agent) Test() error {
}() }()
for _, input := range a.Config.Inputs { for _, input := range a.Config.Inputs {
acc := NewAccumulator(input.Config, pointChan) acc := NewAccumulator(input.Config, metricC)
acc.SetDebug(true) acc.SetDebug(true)
fmt.Printf("* Plugin: %s, Collection 1\n", input.Name) fmt.Printf("* Plugin: %s, Collection 1\n", input.Name)
@ -263,7 +261,7 @@ func (a *Agent) flush() {
} }
// flusher monitors the points input channel and flushes on the minimum interval // flusher monitors the points input channel and flushes on the minimum interval
func (a *Agent) flusher(shutdown chan struct{}, pointChan chan *client.Point) error { func (a *Agent) flusher(shutdown chan struct{}, metricC chan telegraf.Metric) error {
// Inelegant, but this sleep is to allow the Gather threads to run, so that // Inelegant, but this sleep is to allow the Gather threads to run, so that
// the flusher will flush after metrics are collected. // the flusher will flush after metrics are collected.
time.Sleep(time.Millisecond * 200) time.Sleep(time.Millisecond * 200)
@ -278,9 +276,9 @@ func (a *Agent) flusher(shutdown chan struct{}, pointChan chan *client.Point) er
return nil return nil
case <-ticker.C: case <-ticker.C:
a.flush() a.flush()
case pt := <-pointChan: case m := <-metricC:
for _, o := range a.Config.Outputs { for _, o := range a.Config.Outputs {
o.AddPoint(pt) o.AddPoint(m)
} }
} }
} }
@ -321,7 +319,7 @@ func (a *Agent) Run(shutdown chan struct{}) error {
a.Config.Agent.Hostname, a.Config.Agent.FlushInterval.Duration) a.Config.Agent.Hostname, a.Config.Agent.FlushInterval.Duration)
// channel shared between all input threads for accumulating points // channel shared between all input threads for accumulating points
pointChan := make(chan *client.Point, 1000) metricC := make(chan telegraf.Metric, 1000)
// Round collection to nearest interval by sleeping // Round collection to nearest interval by sleeping
if a.Config.Agent.RoundInterval { if a.Config.Agent.RoundInterval {
@ -333,7 +331,7 @@ func (a *Agent) Run(shutdown chan struct{}) error {
wg.Add(1) wg.Add(1)
go func() { go func() {
defer wg.Done() defer wg.Done()
if err := a.flusher(shutdown, pointChan); err != nil { if err := a.flusher(shutdown, metricC); err != nil {
log.Printf("Flusher routine failed, exiting: %s\n", err.Error()) log.Printf("Flusher routine failed, exiting: %s\n", err.Error())
close(shutdown) close(shutdown)
} }
@ -358,7 +356,7 @@ func (a *Agent) Run(shutdown chan struct{}) error {
wg.Add(1) wg.Add(1)
go func(input *internal_models.RunningInput) { go func(input *internal_models.RunningInput) {
defer wg.Done() defer wg.Done()
if err := a.gatherSeparate(shutdown, input, pointChan); err != nil { if err := a.gatherSeparate(shutdown, input, metricC); err != nil {
log.Printf(err.Error()) log.Printf(err.Error())
} }
}(input) }(input)
@ -368,7 +366,7 @@ func (a *Agent) Run(shutdown chan struct{}) error {
defer wg.Wait() defer wg.Wait()
for { for {
if err := a.gatherParallel(pointChan); err != nil { if err := a.gatherParallel(metricC); err != nil {
log.Printf(err.Error()) log.Printf(err.Error())
} }

View File

@ -3,7 +3,7 @@ package internal_models
import ( import (
"strings" "strings"
"github.com/influxdata/influxdb/client/v2" "github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/internal"
) )
@ -24,8 +24,8 @@ type Filter struct {
IsActive bool IsActive bool
} }
func (f Filter) ShouldPointPass(point *client.Point) bool { func (f Filter) ShouldMetricPass(metric telegraf.Metric) bool {
if f.ShouldPass(point.Name()) && f.ShouldTagsPass(point.Tags()) { if f.ShouldPass(metric.Name()) && f.ShouldTagsPass(metric.Tags()) {
return true return true
} }
return false return false

View File

@ -5,8 +5,6 @@ import (
"time" "time"
"github.com/influxdata/telegraf" "github.com/influxdata/telegraf"
"github.com/influxdata/influxdb/client/v2"
) )
const DEFAULT_POINT_BUFFER_LIMIT = 10000 const DEFAULT_POINT_BUFFER_LIMIT = 10000
@ -18,7 +16,7 @@ type RunningOutput struct {
Quiet bool Quiet bool
PointBufferLimit int PointBufferLimit int
points []*client.Point metrics []telegraf.Metric
overwriteCounter int overwriteCounter int
} }
@ -29,7 +27,7 @@ func NewRunningOutput(
) *RunningOutput { ) *RunningOutput {
ro := &RunningOutput{ ro := &RunningOutput{
Name: name, Name: name,
points: make([]*client.Point, 0), metrics: make([]telegraf.Metric, 0),
Output: output, Output: output,
Config: conf, Config: conf,
PointBufferLimit: DEFAULT_POINT_BUFFER_LIMIT, PointBufferLimit: DEFAULT_POINT_BUFFER_LIMIT,
@ -37,34 +35,34 @@ func NewRunningOutput(
return ro return ro
} }
func (ro *RunningOutput) AddPoint(point *client.Point) { func (ro *RunningOutput) AddPoint(point telegraf.Metric) {
if ro.Config.Filter.IsActive { if ro.Config.Filter.IsActive {
if !ro.Config.Filter.ShouldPointPass(point) { if !ro.Config.Filter.ShouldMetricPass(point) {
return return
} }
} }
if len(ro.points) < ro.PointBufferLimit { if len(ro.metrics) < ro.PointBufferLimit {
ro.points = append(ro.points, point) ro.metrics = append(ro.metrics, point)
} else { } else {
if ro.overwriteCounter == len(ro.points) { if ro.overwriteCounter == len(ro.metrics) {
ro.overwriteCounter = 0 ro.overwriteCounter = 0
} }
ro.points[ro.overwriteCounter] = point ro.metrics[ro.overwriteCounter] = point
ro.overwriteCounter++ ro.overwriteCounter++
} }
} }
func (ro *RunningOutput) Write() error { func (ro *RunningOutput) Write() error {
start := time.Now() start := time.Now()
err := ro.Output.Write(ro.points) err := ro.Output.Write(ro.metrics)
elapsed := time.Since(start) elapsed := time.Since(start)
if err == nil { if err == nil {
if !ro.Quiet { if !ro.Quiet {
log.Printf("Wrote %d metrics to output %s in %s\n", log.Printf("Wrote %d metrics to output %s in %s\n",
len(ro.points), ro.Name, elapsed) len(ro.metrics), ro.Name, elapsed)
} }
ro.points = make([]*client.Point, 0) ro.metrics = make([]telegraf.Metric, 0)
ro.overwriteCounter = 0 ro.overwriteCounter = 0
} }
return err return err

View File

@ -1,37 +1,5 @@
package telegraf package telegraf
import "github.com/influxdata/influxdb/client/v2"
// type Output interface {
// // Connect to the Output
// Connect() error
// // Close any connections to the Output
// Close() error
// // Description returns a one-sentence description on the Output
// Description() string
// // SampleConfig returns the default configuration of the Output
// SampleConfig() string
// // Write takes in group of points to be written to the Output
// Write(metrics []Metric) error
// }
// type ServiceOutput interface {
// // Connect to the Output
// Connect() error
// // Close any connections to the Output
// Close() error
// // Description returns a one-sentence description on the Output
// Description() string
// // SampleConfig returns the default configuration of the Output
// SampleConfig() string
// // Write takes in group of points to be written to the Output
// Write(metrics []Metric) error
// // Start the "service" that will provide an Output
// Start() error
// // Stop the "service" that will provide an Output
// Stop()
// }
type Output interface { type Output interface {
// Connect to the Output // Connect to the Output
Connect() error Connect() error
@ -42,7 +10,7 @@ type Output interface {
// SampleConfig returns the default configuration of the Output // SampleConfig returns the default configuration of the Output
SampleConfig() string SampleConfig() string
// Write takes in group of points to be written to the Output // Write takes in group of points to be written to the Output
Write(points []*client.Point) error Write(metrics []Metric) error
} }
type ServiceOutput interface { type ServiceOutput interface {
@ -55,7 +23,7 @@ type ServiceOutput interface {
// SampleConfig returns the default configuration of the Output // SampleConfig returns the default configuration of the Output
SampleConfig() string SampleConfig() string
// Write takes in group of points to be written to the Output // Write takes in group of points to be written to the Output
Write(points []*client.Point) error Write(metrics []Metric) error
// Start the "service" that will provide an Output // Start the "service" that will provide an Output
Start() error Start() error
// Stop the "service" that will provide an Output // Stop the "service" that will provide an Output

View File

@ -45,7 +45,7 @@ func (gh *GithubWebhooks) Gather(acc telegraf.Accumulator) error {
gh.Lock() gh.Lock()
defer gh.Unlock() defer gh.Unlock()
for _, event := range gh.events { for _, event := range gh.events {
p := event.NewPoint() p := event.NewMetric()
acc.AddFields("github_webhooks", p.Fields(), p.Tags(), p.Time()) acc.AddFields("github_webhooks", p.Fields(), p.Tags(), p.Time())
} }
gh.events = make([]Event, 0) gh.events = make([]Event, 0)

View File

@ -5,13 +5,13 @@ import (
"log" "log"
"time" "time"
"github.com/influxdata/influxdb/client/v2" "github.com/influxdata/telegraf"
) )
const meas = "github_webhooks" const meas = "github_webhooks"
type Event interface { type Event interface {
NewPoint() *client.Point NewMetric() telegraf.Metric
} }
type Repository struct { type Repository struct {
@ -90,7 +90,7 @@ type CommitCommentEvent struct {
Sender Sender `json:"sender"` Sender Sender `json:"sender"`
} }
func (s CommitCommentEvent) NewPoint() *client.Point { func (s CommitCommentEvent) NewMetric() telegraf.Metric {
event := "commit_comment" event := "commit_comment"
t := map[string]string{ t := map[string]string{
"event": event, "event": event,
@ -106,11 +106,11 @@ func (s CommitCommentEvent) NewPoint() *client.Point {
"commit": s.Comment.Commit, "commit": s.Comment.Commit,
"comment": s.Comment.Body, "comment": s.Comment.Body,
} }
p, err := client.NewPoint(meas, t, f, time.Now()) m, err := telegraf.NewMetric(meas, t, f, time.Now())
if err != nil { if err != nil {
log.Fatalf("Failed to create %v event", event) log.Fatalf("Failed to create %v event", event)
} }
return p return m
} }
type CreateEvent struct { type CreateEvent struct {
@ -120,7 +120,7 @@ type CreateEvent struct {
Sender Sender `json:"sender"` Sender Sender `json:"sender"`
} }
func (s CreateEvent) NewPoint() *client.Point { func (s CreateEvent) NewMetric() telegraf.Metric {
event := "create" event := "create"
t := map[string]string{ t := map[string]string{
"event": event, "event": event,
@ -136,11 +136,11 @@ func (s CreateEvent) NewPoint() *client.Point {
"ref": s.Ref, "ref": s.Ref,
"refType": s.RefType, "refType": s.RefType,
} }
p, err := client.NewPoint(meas, t, f, time.Now()) m, err := telegraf.NewMetric(meas, t, f, time.Now())
if err != nil { if err != nil {
log.Fatalf("Failed to create %v event", event) log.Fatalf("Failed to create %v event", event)
} }
return p return m
} }
type DeleteEvent struct { type DeleteEvent struct {
@ -150,7 +150,7 @@ type DeleteEvent struct {
Sender Sender `json:"sender"` Sender Sender `json:"sender"`
} }
func (s DeleteEvent) NewPoint() *client.Point { func (s DeleteEvent) NewMetric() telegraf.Metric {
event := "delete" event := "delete"
t := map[string]string{ t := map[string]string{
"event": event, "event": event,
@ -166,11 +166,11 @@ func (s DeleteEvent) NewPoint() *client.Point {
"ref": s.Ref, "ref": s.Ref,
"refType": s.RefType, "refType": s.RefType,
} }
p, err := client.NewPoint(meas, t, f, time.Now()) m, err := telegraf.NewMetric(meas, t, f, time.Now())
if err != nil { if err != nil {
log.Fatalf("Failed to create %v event", event) log.Fatalf("Failed to create %v event", event)
} }
return p return m
} }
type DeploymentEvent struct { type DeploymentEvent struct {
@ -179,7 +179,7 @@ type DeploymentEvent struct {
Sender Sender `json:"sender"` Sender Sender `json:"sender"`
} }
func (s DeploymentEvent) NewPoint() *client.Point { func (s DeploymentEvent) NewMetric() telegraf.Metric {
event := "deployment" event := "deployment"
t := map[string]string{ t := map[string]string{
"event": event, "event": event,
@ -197,11 +197,11 @@ func (s DeploymentEvent) NewPoint() *client.Point {
"environment": s.Deployment.Environment, "environment": s.Deployment.Environment,
"description": s.Deployment.Description, "description": s.Deployment.Description,
} }
p, err := client.NewPoint(meas, t, f, time.Now()) m, err := telegraf.NewMetric(meas, t, f, time.Now())
if err != nil { if err != nil {
log.Fatalf("Failed to create %v event", event) log.Fatalf("Failed to create %v event", event)
} }
return p return m
} }
type DeploymentStatusEvent struct { type DeploymentStatusEvent struct {
@ -211,7 +211,7 @@ type DeploymentStatusEvent struct {
Sender Sender `json:"sender"` Sender Sender `json:"sender"`
} }
func (s DeploymentStatusEvent) NewPoint() *client.Point { func (s DeploymentStatusEvent) NewMetric() telegraf.Metric {
event := "delete" event := "delete"
t := map[string]string{ t := map[string]string{
"event": event, "event": event,
@ -231,11 +231,11 @@ func (s DeploymentStatusEvent) NewPoint() *client.Point {
"depState": s.DeploymentStatus.State, "depState": s.DeploymentStatus.State,
"depDescription": s.DeploymentStatus.Description, "depDescription": s.DeploymentStatus.Description,
} }
p, err := client.NewPoint(meas, t, f, time.Now()) m, err := telegraf.NewMetric(meas, t, f, time.Now())
if err != nil { if err != nil {
log.Fatalf("Failed to create %v event", event) log.Fatalf("Failed to create %v event", event)
} }
return p return m
} }
type ForkEvent struct { type ForkEvent struct {
@ -244,7 +244,7 @@ type ForkEvent struct {
Sender Sender `json:"sender"` Sender Sender `json:"sender"`
} }
func (s ForkEvent) NewPoint() *client.Point { func (s ForkEvent) NewMetric() telegraf.Metric {
event := "fork" event := "fork"
t := map[string]string{ t := map[string]string{
"event": event, "event": event,
@ -259,11 +259,11 @@ func (s ForkEvent) NewPoint() *client.Point {
"issues": s.Repository.Issues, "issues": s.Repository.Issues,
"fork": s.Forkee.Repository, "fork": s.Forkee.Repository,
} }
p, err := client.NewPoint(meas, t, f, time.Now()) m, err := telegraf.NewMetric(meas, t, f, time.Now())
if err != nil { if err != nil {
log.Fatalf("Failed to create %v event", event) log.Fatalf("Failed to create %v event", event)
} }
return p return m
} }
type GollumEvent struct { type GollumEvent struct {
@ -273,7 +273,7 @@ type GollumEvent struct {
} }
// REVIEW: Going to be lazy and not deal with the pages. // REVIEW: Going to be lazy and not deal with the pages.
func (s GollumEvent) NewPoint() *client.Point { func (s GollumEvent) NewMetric() telegraf.Metric {
event := "gollum" event := "gollum"
t := map[string]string{ t := map[string]string{
"event": event, "event": event,
@ -287,11 +287,11 @@ func (s GollumEvent) NewPoint() *client.Point {
"forks": s.Repository.Forks, "forks": s.Repository.Forks,
"issues": s.Repository.Issues, "issues": s.Repository.Issues,
} }
p, err := client.NewPoint(meas, t, f, time.Now()) m, err := telegraf.NewMetric(meas, t, f, time.Now())
if err != nil { if err != nil {
log.Fatalf("Failed to create %v event", event) log.Fatalf("Failed to create %v event", event)
} }
return p return m
} }
type IssueCommentEvent struct { type IssueCommentEvent struct {
@ -301,7 +301,7 @@ type IssueCommentEvent struct {
Sender Sender `json:"sender"` Sender Sender `json:"sender"`
} }
func (s IssueCommentEvent) NewPoint() *client.Point { func (s IssueCommentEvent) NewMetric() telegraf.Metric {
event := "issue_comment" event := "issue_comment"
t := map[string]string{ t := map[string]string{
"event": event, "event": event,
@ -319,11 +319,11 @@ func (s IssueCommentEvent) NewPoint() *client.Point {
"comments": s.Issue.Comments, "comments": s.Issue.Comments,
"body": s.Comment.Body, "body": s.Comment.Body,
} }
p, err := client.NewPoint(meas, t, f, time.Now()) m, err := telegraf.NewMetric(meas, t, f, time.Now())
if err != nil { if err != nil {
log.Fatalf("Failed to create %v event", event) log.Fatalf("Failed to create %v event", event)
} }
return p return m
} }
type IssuesEvent struct { type IssuesEvent struct {
@ -333,7 +333,7 @@ type IssuesEvent struct {
Sender Sender `json:"sender"` Sender Sender `json:"sender"`
} }
func (s IssuesEvent) NewPoint() *client.Point { func (s IssuesEvent) NewMetric() telegraf.Metric {
event := "issue" event := "issue"
t := map[string]string{ t := map[string]string{
"event": event, "event": event,
@ -351,11 +351,11 @@ func (s IssuesEvent) NewPoint() *client.Point {
"title": s.Issue.Title, "title": s.Issue.Title,
"comments": s.Issue.Comments, "comments": s.Issue.Comments,
} }
p, err := client.NewPoint(meas, t, f, time.Now()) m, err := telegraf.NewMetric(meas, t, f, time.Now())
if err != nil { if err != nil {
log.Fatalf("Failed to create %v event", event) log.Fatalf("Failed to create %v event", event)
} }
return p return m
} }
type MemberEvent struct { type MemberEvent struct {
@ -364,7 +364,7 @@ type MemberEvent struct {
Sender Sender `json:"sender"` Sender Sender `json:"sender"`
} }
func (s MemberEvent) NewPoint() *client.Point { func (s MemberEvent) NewMetric() telegraf.Metric {
event := "member" event := "member"
t := map[string]string{ t := map[string]string{
"event": event, "event": event,
@ -380,11 +380,11 @@ func (s MemberEvent) NewPoint() *client.Point {
"newMember": s.Member.User, "newMember": s.Member.User,
"newMemberStatus": s.Member.Admin, "newMemberStatus": s.Member.Admin,
} }
p, err := client.NewPoint(meas, t, f, time.Now()) m, err := telegraf.NewMetric(meas, t, f, time.Now())
if err != nil { if err != nil {
log.Fatalf("Failed to create %v event", event) log.Fatalf("Failed to create %v event", event)
} }
return p return m
} }
type MembershipEvent struct { type MembershipEvent struct {
@ -394,7 +394,7 @@ type MembershipEvent struct {
Team Team `json:"team"` Team Team `json:"team"`
} }
func (s MembershipEvent) NewPoint() *client.Point { func (s MembershipEvent) NewMetric() telegraf.Metric {
event := "membership" event := "membership"
t := map[string]string{ t := map[string]string{
"event": event, "event": event,
@ -406,11 +406,11 @@ func (s MembershipEvent) NewPoint() *client.Point {
"newMember": s.Member.User, "newMember": s.Member.User,
"newMemberStatus": s.Member.Admin, "newMemberStatus": s.Member.Admin,
} }
p, err := client.NewPoint(meas, t, f, time.Now()) m, err := telegraf.NewMetric(meas, t, f, time.Now())
if err != nil { if err != nil {
log.Fatalf("Failed to create %v event", event) log.Fatalf("Failed to create %v event", event)
} }
return p return m
} }
type PageBuildEvent struct { type PageBuildEvent struct {
@ -418,7 +418,7 @@ type PageBuildEvent struct {
Sender Sender `json:"sender"` Sender Sender `json:"sender"`
} }
func (s PageBuildEvent) NewPoint() *client.Point { func (s PageBuildEvent) NewMetric() telegraf.Metric {
event := "page_build" event := "page_build"
t := map[string]string{ t := map[string]string{
"event": event, "event": event,
@ -432,11 +432,11 @@ func (s PageBuildEvent) NewPoint() *client.Point {
"forks": s.Repository.Forks, "forks": s.Repository.Forks,
"issues": s.Repository.Issues, "issues": s.Repository.Issues,
} }
p, err := client.NewPoint(meas, t, f, time.Now()) m, err := telegraf.NewMetric(meas, t, f, time.Now())
if err != nil { if err != nil {
log.Fatalf("Failed to create %v event", event) log.Fatalf("Failed to create %v event", event)
} }
return p return m
} }
type PublicEvent struct { type PublicEvent struct {
@ -444,7 +444,7 @@ type PublicEvent struct {
Sender Sender `json:"sender"` Sender Sender `json:"sender"`
} }
func (s PublicEvent) NewPoint() *client.Point { func (s PublicEvent) NewMetric() telegraf.Metric {
event := "public" event := "public"
t := map[string]string{ t := map[string]string{
"event": event, "event": event,
@ -458,11 +458,11 @@ func (s PublicEvent) NewPoint() *client.Point {
"forks": s.Repository.Forks, "forks": s.Repository.Forks,
"issues": s.Repository.Issues, "issues": s.Repository.Issues,
} }
p, err := client.NewPoint(meas, t, f, time.Now()) m, err := telegraf.NewMetric(meas, t, f, time.Now())
if err != nil { if err != nil {
log.Fatalf("Failed to create %v event", event) log.Fatalf("Failed to create %v event", event)
} }
return p return m
} }
type PullRequestEvent struct { type PullRequestEvent struct {
@ -472,7 +472,7 @@ type PullRequestEvent struct {
Sender Sender `json:"sender"` Sender Sender `json:"sender"`
} }
func (s PullRequestEvent) NewPoint() *client.Point { func (s PullRequestEvent) NewMetric() telegraf.Metric {
event := "pull_request" event := "pull_request"
t := map[string]string{ t := map[string]string{
"event": event, "event": event,
@ -495,11 +495,11 @@ func (s PullRequestEvent) NewPoint() *client.Point {
"deletions": s.PullRequest.Deletions, "deletions": s.PullRequest.Deletions,
"changedFiles": s.PullRequest.ChangedFiles, "changedFiles": s.PullRequest.ChangedFiles,
} }
p, err := client.NewPoint(meas, t, f, time.Now()) m, err := telegraf.NewMetric(meas, t, f, time.Now())
if err != nil { if err != nil {
log.Fatalf("Failed to create %v event", event) log.Fatalf("Failed to create %v event", event)
} }
return p return m
} }
type PullRequestReviewCommentEvent struct { type PullRequestReviewCommentEvent struct {
@ -509,7 +509,7 @@ type PullRequestReviewCommentEvent struct {
Sender Sender `json:"sender"` Sender Sender `json:"sender"`
} }
func (s PullRequestReviewCommentEvent) NewPoint() *client.Point { func (s PullRequestReviewCommentEvent) NewMetric() telegraf.Metric {
event := "pull_request_review_comment" event := "pull_request_review_comment"
t := map[string]string{ t := map[string]string{
"event": event, "event": event,
@ -533,11 +533,11 @@ func (s PullRequestReviewCommentEvent) NewPoint() *client.Point {
"commentFile": s.Comment.File, "commentFile": s.Comment.File,
"comment": s.Comment.Comment, "comment": s.Comment.Comment,
} }
p, err := client.NewPoint(meas, t, f, time.Now()) m, err := telegraf.NewMetric(meas, t, f, time.Now())
if err != nil { if err != nil {
log.Fatalf("Failed to create %v event", event) log.Fatalf("Failed to create %v event", event)
} }
return p return m
} }
type PushEvent struct { type PushEvent struct {
@ -548,7 +548,7 @@ type PushEvent struct {
Sender Sender `json:"sender"` Sender Sender `json:"sender"`
} }
func (s PushEvent) NewPoint() *client.Point { func (s PushEvent) NewMetric() telegraf.Metric {
event := "push" event := "push"
t := map[string]string{ t := map[string]string{
"event": event, "event": event,
@ -565,11 +565,11 @@ func (s PushEvent) NewPoint() *client.Point {
"before": s.Before, "before": s.Before,
"after": s.After, "after": s.After,
} }
p, err := client.NewPoint(meas, t, f, time.Now()) m, err := telegraf.NewMetric(meas, t, f, time.Now())
if err != nil { if err != nil {
log.Fatalf("Failed to create %v event", event) log.Fatalf("Failed to create %v event", event)
} }
return p return m
} }
type ReleaseEvent struct { type ReleaseEvent struct {
@ -578,7 +578,7 @@ type ReleaseEvent struct {
Sender Sender `json:"sender"` Sender Sender `json:"sender"`
} }
func (s ReleaseEvent) NewPoint() *client.Point { func (s ReleaseEvent) NewMetric() telegraf.Metric {
event := "release" event := "release"
t := map[string]string{ t := map[string]string{
"event": event, "event": event,
@ -593,11 +593,11 @@ func (s ReleaseEvent) NewPoint() *client.Point {
"issues": s.Repository.Issues, "issues": s.Repository.Issues,
"tagName": s.Release.TagName, "tagName": s.Release.TagName,
} }
p, err := client.NewPoint(meas, t, f, time.Now()) m, err := telegraf.NewMetric(meas, t, f, time.Now())
if err != nil { if err != nil {
log.Fatalf("Failed to create %v event", event) log.Fatalf("Failed to create %v event", event)
} }
return p return m
} }
type RepositoryEvent struct { type RepositoryEvent struct {
@ -605,7 +605,7 @@ type RepositoryEvent struct {
Sender Sender `json:"sender"` Sender Sender `json:"sender"`
} }
func (s RepositoryEvent) NewPoint() *client.Point { func (s RepositoryEvent) NewMetric() telegraf.Metric {
event := "repository" event := "repository"
t := map[string]string{ t := map[string]string{
"event": event, "event": event,
@ -619,11 +619,11 @@ func (s RepositoryEvent) NewPoint() *client.Point {
"forks": s.Repository.Forks, "forks": s.Repository.Forks,
"issues": s.Repository.Issues, "issues": s.Repository.Issues,
} }
p, err := client.NewPoint(meas, t, f, time.Now()) m, err := telegraf.NewMetric(meas, t, f, time.Now())
if err != nil { if err != nil {
log.Fatalf("Failed to create %v event", event) log.Fatalf("Failed to create %v event", event)
} }
return p return m
} }
type StatusEvent struct { type StatusEvent struct {
@ -633,7 +633,7 @@ type StatusEvent struct {
Sender Sender `json:"sender"` Sender Sender `json:"sender"`
} }
func (s StatusEvent) NewPoint() *client.Point { func (s StatusEvent) NewMetric() telegraf.Metric {
event := "status" event := "status"
t := map[string]string{ t := map[string]string{
"event": event, "event": event,
@ -649,11 +649,11 @@ func (s StatusEvent) NewPoint() *client.Point {
"commit": s.Commit, "commit": s.Commit,
"state": s.State, "state": s.State,
} }
p, err := client.NewPoint(meas, t, f, time.Now()) m, err := telegraf.NewMetric(meas, t, f, time.Now())
if err != nil { if err != nil {
log.Fatalf("Failed to create %v event", event) log.Fatalf("Failed to create %v event", event)
} }
return p return m
} }
type TeamAddEvent struct { type TeamAddEvent struct {
@ -662,7 +662,7 @@ type TeamAddEvent struct {
Sender Sender `json:"sender"` Sender Sender `json:"sender"`
} }
func (s TeamAddEvent) NewPoint() *client.Point { func (s TeamAddEvent) NewMetric() telegraf.Metric {
event := "team_add" event := "team_add"
t := map[string]string{ t := map[string]string{
"event": event, "event": event,
@ -677,11 +677,11 @@ func (s TeamAddEvent) NewPoint() *client.Point {
"issues": s.Repository.Issues, "issues": s.Repository.Issues,
"teamName": s.Team.Name, "teamName": s.Team.Name,
} }
p, err := client.NewPoint(meas, t, f, time.Now()) m, err := telegraf.NewMetric(meas, t, f, time.Now())
if err != nil { if err != nil {
log.Fatalf("Failed to create %v event", event) log.Fatalf("Failed to create %v event", event)
} }
return p return m
} }
type WatchEvent struct { type WatchEvent struct {
@ -689,7 +689,7 @@ type WatchEvent struct {
Sender Sender `json:"sender"` Sender Sender `json:"sender"`
} }
func (s WatchEvent) NewPoint() *client.Point { func (s WatchEvent) NewMetric() telegraf.Metric {
event := "delete" event := "delete"
t := map[string]string{ t := map[string]string{
"event": event, "event": event,
@ -703,9 +703,9 @@ func (s WatchEvent) NewPoint() *client.Point {
"forks": s.Repository.Forks, "forks": s.Repository.Forks,
"issues": s.Repository.Issues, "issues": s.Repository.Issues,
} }
p, err := client.NewPoint(meas, t, f, time.Now()) m, err := telegraf.NewMetric(meas, t, f, time.Now())
if err != nil { if err != nil {
log.Fatalf("Failed to create %v event", event) log.Fatalf("Failed to create %v event", event)
} }
return p return m
} }

View File

@ -136,7 +136,7 @@ func TestHttpJson200(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, 12, acc.NFields()) assert.Equal(t, 12, acc.NFields())
// Set responsetime // Set responsetime
for _, p := range acc.Points { for _, p := range acc.Metrics {
p.Fields["response_time"] = 1.0 p.Fields["response_time"] = 1.0
} }
@ -203,7 +203,7 @@ func TestHttpJson200Tags(t *testing.T) {
var acc testutil.Accumulator var acc testutil.Accumulator
err := service.Gather(&acc) err := service.Gather(&acc)
// Set responsetime // Set responsetime
for _, p := range acc.Points { for _, p := range acc.Metrics {
p.Fields["response_time"] = 1.0 p.Fields["response_time"] = 1.0
} }
require.NoError(t, err) require.NoError(t, err)

View File

@ -71,7 +71,7 @@ func TestBasic(t *testing.T) {
var acc testutil.Accumulator var acc testutil.Accumulator
require.NoError(t, plugin.Gather(&acc)) require.NoError(t, plugin.Gather(&acc))
require.Len(t, acc.Points, 2) require.Len(t, acc.Metrics, 2)
fields := map[string]interface{}{ fields := map[string]interface{}{
// JSON will truncate floats to integer representations. // JSON will truncate floats to integer representations.
// Since there's no distinction in JSON, we can't assume it's an int. // Since there's no distinction in JSON, we can't assume it's an int.

View File

@ -85,7 +85,7 @@ func TestHttpJsonMultiValue(t *testing.T) {
err := jolokia.Gather(&acc) err := jolokia.Gather(&acc)
assert.Nil(t, err) assert.Nil(t, err)
assert.Equal(t, 1, len(acc.Points)) assert.Equal(t, 1, len(acc.Metrics))
fields := map[string]interface{}{ fields := map[string]interface{}{
"heap_memory_usage_init": 67108864.0, "heap_memory_usage_init": 67108864.0,
@ -112,5 +112,5 @@ func TestHttpJsonOn404(t *testing.T) {
err := jolokia.Gather(&acc) err := jolokia.Gather(&acc)
assert.Nil(t, err) assert.Nil(t, err)
assert.Equal(t, 0, len(acc.Points)) assert.Equal(t, 0, len(acc.Metrics))
} }

View File

@ -5,7 +5,6 @@ import (
"strings" "strings"
"sync" "sync"
"github.com/influxdata/influxdb/models"
"github.com/influxdata/telegraf" "github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/inputs"
@ -28,7 +27,7 @@ type Kafka struct {
// channel for all kafka consumer errors // channel for all kafka consumer errors
errs <-chan *sarama.ConsumerError errs <-chan *sarama.ConsumerError
// channel for all incoming parsed kafka points // channel for all incoming parsed kafka points
pointChan chan models.Point metricC chan telegraf.Metric
done chan struct{} done chan struct{}
// doNotCommitMsgs tells the parser not to call CommitUpTo on the consumer // doNotCommitMsgs tells the parser not to call CommitUpTo on the consumer
@ -94,7 +93,7 @@ func (k *Kafka) Start() error {
if k.PointBuffer == 0 { if k.PointBuffer == 0 {
k.PointBuffer = 100000 k.PointBuffer = 100000
} }
k.pointChan = make(chan models.Point, k.PointBuffer) k.metricC = make(chan telegraf.Metric, k.PointBuffer)
// Start the kafka message reader // Start the kafka message reader
go k.parser() go k.parser()
@ -113,18 +112,18 @@ func (k *Kafka) parser() {
case err := <-k.errs: case err := <-k.errs:
log.Printf("Kafka Consumer Error: %s\n", err.Error()) log.Printf("Kafka Consumer Error: %s\n", err.Error())
case msg := <-k.in: case msg := <-k.in:
points, err := models.ParsePoints(msg.Value) metrics, err := telegraf.ParseMetrics(msg.Value)
if err != nil { if err != nil {
log.Printf("Could not parse kafka message: %s, error: %s", log.Printf("Could not parse kafka message: %s, error: %s",
string(msg.Value), err.Error()) string(msg.Value), err.Error())
} }
for _, point := range points { for _, metric := range metrics {
select { select {
case k.pointChan <- point: case k.metricC <- metric:
continue continue
default: default:
log.Printf("Kafka Consumer buffer is full, dropping a point." + log.Printf("Kafka Consumer buffer is full, dropping a metric." +
" You may want to increase the point_buffer setting") " You may want to increase the point_buffer setting")
} }
} }
@ -152,9 +151,9 @@ func (k *Kafka) Stop() {
func (k *Kafka) Gather(acc telegraf.Accumulator) error { func (k *Kafka) Gather(acc telegraf.Accumulator) error {
k.Lock() k.Lock()
defer k.Unlock() defer k.Unlock()
npoints := len(k.pointChan) npoints := len(k.metricC)
for i := 0; i < npoints; i++ { for i := 0; i < npoints; i++ {
point := <-k.pointChan point := <-k.metricC
acc.AddFields(point.Name(), point.Fields(), point.Tags(), point.Time()) acc.AddFields(point.Name(), point.Fields(), point.Tags(), point.Time())
} }
return nil return nil

View File

@ -51,13 +51,13 @@ func TestReadsMetricsFromKafka(t *testing.T) {
// Verify that we can now gather the sent message // Verify that we can now gather the sent message
var acc testutil.Accumulator var acc testutil.Accumulator
// Sanity check // Sanity check
assert.Equal(t, 0, len(acc.Points), "There should not be any points") assert.Equal(t, 0, len(acc.Metrics), "There should not be any points")
// Gather points // Gather points
err = k.Gather(&acc) err = k.Gather(&acc)
require.NoError(t, err) require.NoError(t, err)
if len(acc.Points) == 1 { if len(acc.Metrics) == 1 {
point := acc.Points[0] point := acc.Metrics[0]
assert.Equal(t, "cpu_load_short", point.Measurement) assert.Equal(t, "cpu_load_short", point.Measurement)
assert.Equal(t, map[string]interface{}{"value": 23422.0}, point.Fields) assert.Equal(t, map[string]interface{}{"value": 23422.0}, point.Fields)
assert.Equal(t, map[string]string{ assert.Equal(t, map[string]string{
@ -83,7 +83,7 @@ func waitForPoint(k *Kafka, t *testing.T) {
counter++ counter++
if counter > 1000 { if counter > 1000 {
t.Fatal("Waited for 5s, point never arrived to consumer") t.Fatal("Waited for 5s, point never arrived to consumer")
} else if len(k.pointChan) == 1 { } else if len(k.metricC) == 1 {
return return
} }
} }

View File

@ -4,7 +4,7 @@ import (
"testing" "testing"
"time" "time"
"github.com/influxdata/influxdb/models" "github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/testutil" "github.com/influxdata/telegraf/testutil"
"github.com/Shopify/sarama" "github.com/Shopify/sarama"
@ -29,7 +29,7 @@ func NewTestKafka() (*Kafka, chan *sarama.ConsumerMessage) {
doNotCommitMsgs: true, doNotCommitMsgs: true,
errs: make(chan *sarama.ConsumerError, pointBuffer), errs: make(chan *sarama.ConsumerError, pointBuffer),
done: make(chan struct{}), done: make(chan struct{}),
pointChan: make(chan models.Point, pointBuffer), metricC: make(chan telegraf.Metric, pointBuffer),
} }
return &k, in return &k, in
} }
@ -43,7 +43,7 @@ func TestRunParser(t *testing.T) {
in <- saramaMsg(testMsg) in <- saramaMsg(testMsg)
time.Sleep(time.Millisecond) time.Sleep(time.Millisecond)
assert.Equal(t, len(k.pointChan), 1) assert.Equal(t, len(k.metricC), 1)
} }
// Test that the parser ignores invalid messages // Test that the parser ignores invalid messages
@ -55,7 +55,7 @@ func TestRunParserInvalidMsg(t *testing.T) {
in <- saramaMsg(invalidMsg) in <- saramaMsg(invalidMsg)
time.Sleep(time.Millisecond) time.Sleep(time.Millisecond)
assert.Equal(t, len(k.pointChan), 0) assert.Equal(t, len(k.metricC), 0)
} }
// Test that points are dropped when we hit the buffer limit // Test that points are dropped when we hit the buffer limit
@ -69,7 +69,7 @@ func TestRunParserRespectsBuffer(t *testing.T) {
} }
time.Sleep(time.Millisecond) time.Sleep(time.Millisecond)
assert.Equal(t, len(k.pointChan), 5) assert.Equal(t, len(k.metricC), 5)
} }
// Test that the parser parses kafka messages into points // Test that the parser parses kafka messages into points
@ -84,7 +84,7 @@ func TestRunParserAndGather(t *testing.T) {
acc := testutil.Accumulator{} acc := testutil.Accumulator{}
k.Gather(&acc) k.Gather(&acc)
assert.Equal(t, len(acc.Points), 1) assert.Equal(t, len(acc.Metrics), 1)
acc.AssertContainsFields(t, "cpu_load_short", acc.AssertContainsFields(t, "cpu_load_short",
map[string]interface{}{"value": float64(23422)}) map[string]interface{}{"value": float64(23422)})
} }

View File

@ -113,7 +113,7 @@ func TestPostgresqlDefaultsToAllDatabases(t *testing.T) {
var found bool var found bool
for _, pnt := range acc.Points { for _, pnt := range acc.Metrics {
if pnt.Measurement == "postgresql" { if pnt.Measurement == "postgresql" {
if pnt.Tags["db"] == "postgres" { if pnt.Tags["db"] == "postgres" {
found = true found = true

View File

@ -123,7 +123,7 @@ func assertContainsTaggedFloat(
tags map[string]string, tags map[string]string,
) { ) {
var actualValue float64 var actualValue float64
for _, pt := range acc.Points { for _, pt := range acc.Metrics {
if pt.Measurement == measurement { if pt.Measurement == measurement {
for fieldname, value := range pt.Fields { for fieldname, value := range pt.Fields {
if fieldname == field { if fieldname == field {

View File

@ -57,9 +57,9 @@ func TestDiskStats(t *testing.T) {
err = (&DiskStats{ps: &mps}).Gather(&acc) err = (&DiskStats{ps: &mps}).Gather(&acc)
require.NoError(t, err) require.NoError(t, err)
numDiskPoints := acc.NFields() numDiskMetrics := acc.NFields()
expectedAllDiskPoints := 14 expectedAllDiskMetrics := 14
assert.Equal(t, expectedAllDiskPoints, numDiskPoints) assert.Equal(t, expectedAllDiskMetrics, numDiskMetrics)
tags1 := map[string]string{ tags1 := map[string]string{
"path": "/", "path": "/",
@ -91,15 +91,15 @@ func TestDiskStats(t *testing.T) {
acc.AssertContainsTaggedFields(t, "disk", fields1, tags1) acc.AssertContainsTaggedFields(t, "disk", fields1, tags1)
acc.AssertContainsTaggedFields(t, "disk", fields2, tags2) acc.AssertContainsTaggedFields(t, "disk", fields2, tags2)
// We expect 6 more DiskPoints to show up with an explicit match on "/" // We expect 6 more DiskMetrics to show up with an explicit match on "/"
// and /home not matching the /dev in MountPoints // and /home not matching the /dev in MountPoints
err = (&DiskStats{ps: &mps, MountPoints: []string{"/", "/dev"}}).Gather(&acc) err = (&DiskStats{ps: &mps, MountPoints: []string{"/", "/dev"}}).Gather(&acc)
assert.Equal(t, expectedAllDiskPoints+7, acc.NFields()) assert.Equal(t, expectedAllDiskMetrics+7, acc.NFields())
// We should see all the diskpoints as MountPoints includes both // We should see all the diskpoints as MountPoints includes both
// / and /home // / and /home
err = (&DiskStats{ps: &mps, MountPoints: []string{"/", "/home"}}).Gather(&acc) err = (&DiskStats{ps: &mps, MountPoints: []string{"/", "/home"}}).Gather(&acc)
assert.Equal(t, 2*expectedAllDiskPoints+7, acc.NFields()) assert.Equal(t, 2*expectedAllDiskMetrics+7, acc.NFields())
} }
// func TestDiskIOStats(t *testing.T) { // func TestDiskIOStats(t *testing.T) {
@ -138,9 +138,9 @@ func TestDiskStats(t *testing.T) {
// err = (&DiskIOStats{ps: &mps}).Gather(&acc) // err = (&DiskIOStats{ps: &mps}).Gather(&acc)
// require.NoError(t, err) // require.NoError(t, err)
// numDiskIOPoints := acc.NFields() // numDiskIOMetrics := acc.NFields()
// expectedAllDiskIOPoints := 14 // expectedAllDiskIOMetrics := 14
// assert.Equal(t, expectedAllDiskIOPoints, numDiskIOPoints) // assert.Equal(t, expectedAllDiskIOMetrics, numDiskIOMetrics)
// dtags1 := map[string]string{ // dtags1 := map[string]string{
// "name": "sda1", // "name": "sda1",
@ -166,10 +166,10 @@ func TestDiskStats(t *testing.T) {
// assert.True(t, acc.CheckTaggedValue("write_time", uint64(6087), dtags2)) // assert.True(t, acc.CheckTaggedValue("write_time", uint64(6087), dtags2))
// assert.True(t, acc.CheckTaggedValue("io_time", uint64(246552), dtags2)) // assert.True(t, acc.CheckTaggedValue("io_time", uint64(246552), dtags2))
// // We expect 7 more DiskIOPoints to show up with an explicit match on "sdb1" // // We expect 7 more DiskIOMetrics to show up with an explicit match on "sdb1"
// // and serial should be missing from the tags with SkipSerialNumber set // // and serial should be missing from the tags with SkipSerialNumber set
// err = (&DiskIOStats{ps: &mps, Devices: []string{"sdb1"}, SkipSerialNumber: true}).Gather(&acc) // err = (&DiskIOStats{ps: &mps, Devices: []string{"sdb1"}, SkipSerialNumber: true}).Gather(&acc)
// assert.Equal(t, expectedAllDiskIOPoints+7, acc.NFields()) // assert.Equal(t, expectedAllDiskIOMetrics+7, acc.NFields())
// dtags3 := map[string]string{ // dtags3 := map[string]string{
// "name": "sdb1", // "name": "sdb1",

View File

@ -55,7 +55,7 @@ func TestMemStats(t *testing.T) {
} }
acc.AssertContainsTaggedFields(t, "mem", memfields, make(map[string]string)) acc.AssertContainsTaggedFields(t, "mem", memfields, make(map[string]string))
acc.Points = nil acc.Metrics = nil
err = (&SwapStats{&mps}).Gather(&acc) err = (&SwapStats{&mps}).Gather(&acc)
require.NoError(t, err) require.NoError(t, err)

View File

@ -85,7 +85,7 @@ func TestNetStats(t *testing.T) {
} }
acc.AssertContainsTaggedFields(t, "net", fields2, ntags) acc.AssertContainsTaggedFields(t, "net", fields2, ntags)
acc.Points = nil acc.Metrics = nil
err = (&NetStats{&mps}).Gather(&acc) err = (&NetStats{&mps}).Gather(&acc)
require.NoError(t, err) require.NoError(t, err)

View File

@ -148,7 +148,7 @@ func TestZfsPoolMetrics(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
require.False(t, acc.HasMeasurement("zfs_pool")) require.False(t, acc.HasMeasurement("zfs_pool"))
acc.Points = nil acc.Metrics = nil
z = &Zfs{KstatPath: testKstatPath, KstatMetrics: []string{"arcstats"}, PoolMetrics: true} z = &Zfs{KstatPath: testKstatPath, KstatMetrics: []string{"arcstats"}, PoolMetrics: true}
err = z.Gather(&acc) err = z.Gather(&acc)
@ -198,7 +198,7 @@ func TestZfsGeneratesMetrics(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
acc.AssertContainsTaggedFields(t, "zfs", intMetrics, tags) acc.AssertContainsTaggedFields(t, "zfs", intMetrics, tags)
acc.Points = nil acc.Metrics = nil
//two pools, all metrics //two pools, all metrics
err = os.MkdirAll(testKstatPath+"/STORAGE", 0755) err = os.MkdirAll(testKstatPath+"/STORAGE", 0755)
@ -217,7 +217,7 @@ func TestZfsGeneratesMetrics(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
acc.AssertContainsTaggedFields(t, "zfs", intMetrics, tags) acc.AssertContainsTaggedFields(t, "zfs", intMetrics, tags)
acc.Points = nil acc.Metrics = nil
intMetrics = getKstatMetricsArcOnly() intMetrics = getKstatMetricsArcOnly()

View File

@ -8,10 +8,9 @@ import (
"net/http" "net/http"
"strings" "strings"
"github.com/influxdata/influxdb/client/v2" "github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/internal"
"github.com/influxdata/telegraf/plugins/outputs" "github.com/influxdata/telegraf/plugins/outputs"
"github.com/influxdata/telegraf"
) )
type Amon struct { type Amon struct {
@ -39,7 +38,7 @@ type TimeSeries struct {
type Metric struct { type Metric struct {
Metric string `json:"metric"` Metric string `json:"metric"`
Points [1]Point `json:"points"` Points [1]Point `json:"metrics"`
} }
type Point [2]float64 type Point [2]float64
@ -54,17 +53,17 @@ func (a *Amon) Connect() error {
return nil return nil
} }
func (a *Amon) Write(points []*client.Point) error { func (a *Amon) Write(metrics []telegraf.Metric) error {
if len(points) == 0 { if len(metrics) == 0 {
return nil return nil
} }
ts := TimeSeries{} ts := TimeSeries{}
tempSeries := []*Metric{} tempSeries := []*Metric{}
metricCounter := 0 metricCounter := 0
for _, pt := range points { for _, m := range metrics {
mname := strings.Replace(pt.Name(), "_", ".", -1) mname := strings.Replace(m.Name(), "_", ".", -1)
if amonPts, err := buildPoints(pt); err == nil { if amonPts, err := buildMetrics(m); err == nil {
for fieldName, amonPt := range amonPts { for fieldName, amonPt := range amonPts {
metric := &Metric{ metric := &Metric{
Metric: mname + "_" + strings.Replace(fieldName, "_", ".", -1), Metric: mname + "_" + strings.Replace(fieldName, "_", ".", -1),
@ -74,7 +73,7 @@ func (a *Amon) Write(points []*client.Point) error {
metricCounter++ metricCounter++
} }
} else { } else {
log.Printf("unable to build Metric for %s, skipping\n", pt.Name()) log.Printf("unable to build Metric for %s, skipping\n", m.Name())
} }
} }
@ -116,17 +115,17 @@ func (a *Amon) authenticatedUrl() string {
return fmt.Sprintf("%s/api/system/%s", a.AmonInstance, a.ServerKey) return fmt.Sprintf("%s/api/system/%s", a.AmonInstance, a.ServerKey)
} }
func buildPoints(pt *client.Point) (map[string]Point, error) { func buildMetrics(m telegraf.Metric) (map[string]Point, error) {
pts := make(map[string]Point) ms := make(map[string]Point)
for k, v := range pt.Fields() { for k, v := range m.Fields() {
var p Point var p Point
if err := p.setValue(v); err != nil { if err := p.setValue(v); err != nil {
return pts, fmt.Errorf("unable to extract value from Fields, %s", err.Error()) return ms, fmt.Errorf("unable to extract value from Fields, %s", err.Error())
} }
p[0] = float64(pt.Time().Unix()) p[0] = float64(m.Time().Unix())
pts[k] = p ms[k] = p
} }
return pts, nil return ms, nil
} }
func (p *Point) setValue(v interface{}) error { func (p *Point) setValue(v interface{}) error {

View File

@ -8,17 +8,17 @@ import (
"github.com/influxdata/telegraf/testutil" "github.com/influxdata/telegraf/testutil"
"github.com/influxdata/influxdb/client/v2" "github.com/influxdata/telegraf"
) )
func TestBuildPoint(t *testing.T) { func TestBuildPoint(t *testing.T) {
var tagtests = []struct { var tagtests = []struct {
ptIn *client.Point ptIn telegraf.Metric
outPt Point outPt Point
err error err error
}{ }{
{ {
testutil.TestPoint(float64(0.0), "testpt"), testutil.TestMetric(float64(0.0), "testpt"),
Point{ Point{
float64(time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix()), float64(time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix()),
0.0, 0.0,
@ -26,7 +26,7 @@ func TestBuildPoint(t *testing.T) {
nil, nil,
}, },
{ {
testutil.TestPoint(float64(1.0), "testpt"), testutil.TestMetric(float64(1.0), "testpt"),
Point{ Point{
float64(time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix()), float64(time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix()),
1.0, 1.0,
@ -34,7 +34,7 @@ func TestBuildPoint(t *testing.T) {
nil, nil,
}, },
{ {
testutil.TestPoint(int(10), "testpt"), testutil.TestMetric(int(10), "testpt"),
Point{ Point{
float64(time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix()), float64(time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix()),
10.0, 10.0,
@ -42,7 +42,7 @@ func TestBuildPoint(t *testing.T) {
nil, nil,
}, },
{ {
testutil.TestPoint(int32(112345), "testpt"), testutil.TestMetric(int32(112345), "testpt"),
Point{ Point{
float64(time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix()), float64(time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix()),
112345.0, 112345.0,
@ -50,7 +50,7 @@ func TestBuildPoint(t *testing.T) {
nil, nil,
}, },
{ {
testutil.TestPoint(int64(112345), "testpt"), testutil.TestMetric(int64(112345), "testpt"),
Point{ Point{
float64(time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix()), float64(time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix()),
112345.0, 112345.0,
@ -58,7 +58,7 @@ func TestBuildPoint(t *testing.T) {
nil, nil,
}, },
{ {
testutil.TestPoint(float32(11234.5), "testpt"), testutil.TestMetric(float32(11234.5), "testpt"),
Point{ Point{
float64(time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix()), float64(time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix()),
11234.5, 11234.5,
@ -66,7 +66,7 @@ func TestBuildPoint(t *testing.T) {
nil, nil,
}, },
{ {
testutil.TestPoint("11234.5", "testpt"), testutil.TestMetric("11234.5", "testpt"),
Point{ Point{
float64(time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix()), float64(time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix()),
11234.5, 11234.5,
@ -75,7 +75,7 @@ func TestBuildPoint(t *testing.T) {
}, },
} }
for _, tt := range tagtests { for _, tt := range tagtests {
pt, err := buildPoints(tt.ptIn) pt, err := buildMetrics(tt.ptIn)
if err != nil && tt.err == nil { if err != nil && tt.err == nil {
t.Errorf("%s: unexpected error, %+v\n", tt.ptIn.Name(), err) t.Errorf("%s: unexpected error, %+v\n", tt.ptIn.Name(), err)
} }

View File

@ -10,9 +10,8 @@ import (
"sync" "sync"
"time" "time"
"github.com/influxdata/influxdb/client/v2"
"github.com/influxdata/telegraf/plugins/outputs"
"github.com/influxdata/telegraf" "github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/outputs"
"github.com/streadway/amqp" "github.com/streadway/amqp"
) )
@ -150,17 +149,15 @@ func (q *AMQP) Description() string {
return "Configuration for the AMQP server to send metrics to" return "Configuration for the AMQP server to send metrics to"
} }
func (q *AMQP) Write(points []*client.Point) error { func (q *AMQP) Write(metrics []telegraf.Metric) error {
q.Lock() q.Lock()
defer q.Unlock() defer q.Unlock()
if len(points) == 0 { if len(metrics) == 0 {
return nil return nil
} }
var outbuf = make(map[string][][]byte) var outbuf = make(map[string][][]byte)
for _, p := range points { for _, p := range metrics {
// Combine tags from Point and BatchPoints and grab the resulting
// line-protocol output string to write to AMQP
var value, key string var value, key string
value = p.String() value = p.String()

View File

@ -23,6 +23,6 @@ func TestConnectAndWrite(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
// Verify that we can successfully write data to the amqp broker // Verify that we can successfully write data to the amqp broker
err = q.Write(testutil.MockBatchPoints().Points()) err = q.Write(testutil.MockMetrics())
require.NoError(t, err) require.NoError(t, err)
} }

View File

@ -1,6 +1,6 @@
## Amazon CloudWatch Output for Telegraf ## Amazon CloudWatch Output for Telegraf
This plugin will send points to Amazon CloudWatch. This plugin will send metrics to Amazon CloudWatch.
## Amazon Authentication ## Amazon Authentication

View File

@ -14,9 +14,8 @@ import (
"github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/cloudwatch" "github.com/aws/aws-sdk-go/service/cloudwatch"
"github.com/influxdata/influxdb/client/v2"
"github.com/influxdata/telegraf/plugins/outputs"
"github.com/influxdata/telegraf" "github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/outputs"
) )
type CloudWatch struct { type CloudWatch struct {
@ -73,9 +72,9 @@ func (c *CloudWatch) Close() error {
return nil return nil
} }
func (c *CloudWatch) Write(points []*client.Point) error { func (c *CloudWatch) Write(metrics []telegraf.Metric) error {
for _, pt := range points { for _, m := range metrics {
err := c.WriteSinglePoint(pt) err := c.WriteSinglePoint(m)
if err != nil { if err != nil {
return err return err
} }
@ -87,10 +86,10 @@ func (c *CloudWatch) Write(points []*client.Point) error {
// Write data for a single point. A point can have many fields and one field // Write data for a single point. A point can have many fields and one field
// is equal to one MetricDatum. There is a limit on how many MetricDatums a // is equal to one MetricDatum. There is a limit on how many MetricDatums a
// request can have so we process one Point at a time. // request can have so we process one Point at a time.
func (c *CloudWatch) WriteSinglePoint(point *client.Point) error { func (c *CloudWatch) WriteSinglePoint(point telegraf.Metric) error {
datums := BuildMetricDatum(point) datums := BuildMetricDatum(point)
const maxDatumsPerCall = 20 // PutMetricData only supports up to 20 data points per call const maxDatumsPerCall = 20 // PutMetricData only supports up to 20 data metrics per call
for _, partition := range PartitionDatums(maxDatumsPerCall, datums) { for _, partition := range PartitionDatums(maxDatumsPerCall, datums) {
err := c.WriteToCloudWatch(partition) err := c.WriteToCloudWatch(partition)
@ -144,7 +143,7 @@ func PartitionDatums(size int, datums []*cloudwatch.MetricDatum) [][]*cloudwatch
// Make a MetricDatum for each field in a Point. Only fields with values that can be // Make a MetricDatum for each field in a Point. Only fields with values that can be
// converted to float64 are supported. Non-supported fields are skipped. // converted to float64 are supported. Non-supported fields are skipped.
func BuildMetricDatum(point *client.Point) []*cloudwatch.MetricDatum { func BuildMetricDatum(point telegraf.Metric) []*cloudwatch.MetricDatum {
datums := make([]*cloudwatch.MetricDatum, len(point.Fields())) datums := make([]*cloudwatch.MetricDatum, len(point.Fields()))
i := 0 i := 0
@ -190,15 +189,15 @@ func BuildMetricDatum(point *client.Point) []*cloudwatch.MetricDatum {
// Make a list of Dimensions by using a Point's tags. CloudWatch supports up to // Make a list of Dimensions by using a Point's tags. CloudWatch supports up to
// 10 dimensions per metric so we only keep up to the first 10 alphabetically. // 10 dimensions per metric so we only keep up to the first 10 alphabetically.
// This always includes the "host" tag if it exists. // This always includes the "host" tag if it exists.
func BuildDimensions(ptTags map[string]string) []*cloudwatch.Dimension { func BuildDimensions(mTags map[string]string) []*cloudwatch.Dimension {
const MaxDimensions = 10 const MaxDimensions = 10
dimensions := make([]*cloudwatch.Dimension, int(math.Min(float64(len(ptTags)), MaxDimensions))) dimensions := make([]*cloudwatch.Dimension, int(math.Min(float64(len(mTags)), MaxDimensions)))
i := 0 i := 0
// This is pretty ugly but we always want to include the "host" tag if it exists. // This is pretty ugly but we always want to include the "host" tag if it exists.
if host, ok := ptTags["host"]; ok { if host, ok := mTags["host"]; ok {
dimensions[i] = &cloudwatch.Dimension{ dimensions[i] = &cloudwatch.Dimension{
Name: aws.String("host"), Name: aws.String("host"),
Value: aws.String(host), Value: aws.String(host),
@ -207,7 +206,7 @@ func BuildDimensions(ptTags map[string]string) []*cloudwatch.Dimension {
} }
var keys []string var keys []string
for k := range ptTags { for k := range mTags {
if k != "host" { if k != "host" {
keys = append(keys, k) keys = append(keys, k)
} }
@ -221,7 +220,7 @@ func BuildDimensions(ptTags map[string]string) []*cloudwatch.Dimension {
dimensions[i] = &cloudwatch.Dimension{ dimensions[i] = &cloudwatch.Dimension{
Name: aws.String(k), Name: aws.String(k),
Value: aws.String(ptTags[k]), Value: aws.String(mTags[k]),
} }
i += 1 i += 1

View File

@ -7,7 +7,7 @@ import (
"github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/cloudwatch" "github.com/aws/aws-sdk-go/service/cloudwatch"
"github.com/influxdata/influxdb/client/v2" "github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/testutil" "github.com/influxdata/telegraf/testutil"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
@ -19,7 +19,7 @@ func TestBuildDimensions(t *testing.T) {
assert := assert.New(t) assert := assert.New(t)
testPoint := testutil.TestPoint(1) testPoint := testutil.TestMetric(1)
dimensions := BuildDimensions(testPoint.Tags()) dimensions := BuildDimensions(testPoint.Tags())
tagKeys := make([]string, len(testPoint.Tags())) tagKeys := make([]string, len(testPoint.Tags()))
@ -46,25 +46,25 @@ func TestBuildDimensions(t *testing.T) {
} }
} }
// Test that points with valid values have a MetricDatum created where as non valid do not. // Test that metrics with valid values have a MetricDatum created where as non valid do not.
// Skips "time.Time" type as something is converting the value to string. // Skips "time.Time" type as something is converting the value to string.
func TestBuildMetricDatums(t *testing.T) { func TestBuildMetricDatums(t *testing.T) {
assert := assert.New(t) assert := assert.New(t)
validPoints := []*client.Point{ validMetrics := []telegraf.Metric{
testutil.TestPoint(1), testutil.TestMetric(1),
testutil.TestPoint(int32(1)), testutil.TestMetric(int32(1)),
testutil.TestPoint(int64(1)), testutil.TestMetric(int64(1)),
testutil.TestPoint(float64(1)), testutil.TestMetric(float64(1)),
testutil.TestPoint(true), testutil.TestMetric(true),
} }
for _, point := range validPoints { for _, point := range validMetrics {
datums := BuildMetricDatum(point) datums := BuildMetricDatum(point)
assert.Equal(1, len(datums), "Valid type should create a Datum") assert.Equal(1, len(datums), "Valid type should create a Datum")
} }
nonValidPoint := testutil.TestPoint("Foo") nonValidPoint := testutil.TestMetric("Foo")
assert.Equal(0, len(BuildMetricDatum(nonValidPoint)), "Invalid type should not create a Datum") assert.Equal(0, len(BuildMetricDatum(nonValidPoint)), "Invalid type should not create a Datum")
} }

View File

@ -10,10 +10,9 @@ import (
"sort" "sort"
"strings" "strings"
"github.com/influxdata/influxdb/client/v2" "github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/internal"
"github.com/influxdata/telegraf/plugins/outputs" "github.com/influxdata/telegraf/plugins/outputs"
"github.com/influxdata/telegraf"
) )
type Datadog struct { type Datadog struct {
@ -38,9 +37,9 @@ type TimeSeries struct {
type Metric struct { type Metric struct {
Metric string `json:"metric"` Metric string `json:"metric"`
Points [1]Point `json:"points"` Points [1]Point `json:"metrics"`
Host string `json:"host"` Host string `json:"host"`
Tags []string `json:"tags,omitempty"` Tags []string `json:"tags,omitemmy"`
} }
type Point [2]float64 type Point [2]float64
@ -63,27 +62,29 @@ func (d *Datadog) Connect() error {
return nil return nil
} }
func (d *Datadog) Write(points []*client.Point) error { func (d *Datadog) Write(metrics []telegraf.Metric) error {
if len(points) == 0 { if len(metrics) == 0 {
return nil return nil
} }
ts := TimeSeries{} ts := TimeSeries{}
tempSeries := []*Metric{} tempSeries := []*Metric{}
metricCounter := 0 metricCounter := 0
for _, pt := range points { for _, m := range metrics {
mname := strings.Replace(pt.Name(), "_", ".", -1) mname := strings.Replace(m.Name(), "_", ".", -1)
if amonPts, err := buildPoints(pt); err == nil { if dogMs, err := buildMetrics(m); err == nil {
for fieldName, amonPt := range amonPts { for fieldName, dogM := range dogMs {
metric := &Metric{ metric := &Metric{
Metric: mname + strings.Replace(fieldName, "_", ".", -1), Metric: mname + strings.Replace(fieldName, "_", ".", -1),
Tags: buildTags(m.Tags()),
Host: m.Tags()["host"],
} }
metric.Points[0] = amonPt metric.Points[0] = dogM
tempSeries = append(tempSeries, metric) tempSeries = append(tempSeries, metric)
metricCounter++ metricCounter++
} }
} else { } else {
log.Printf("unable to build Metric for %s, skipping\n", pt.Name()) log.Printf("unable to build Metric for %s, skipping\n", m.Name())
} }
} }
@ -127,23 +128,23 @@ func (d *Datadog) authenticatedUrl() string {
return fmt.Sprintf("%s?%s", d.apiUrl, q.Encode()) return fmt.Sprintf("%s?%s", d.apiUrl, q.Encode())
} }
func buildPoints(pt *client.Point) (map[string]Point, error) { func buildMetrics(m telegraf.Metric) (map[string]Point, error) {
pts := make(map[string]Point) ms := make(map[string]Point)
for k, v := range pt.Fields() { for k, v := range m.Fields() {
var p Point var p Point
if err := p.setValue(v); err != nil { if err := p.setValue(v); err != nil {
return pts, fmt.Errorf("unable to extract value from Fields, %s", err.Error()) return ms, fmt.Errorf("unable to extract value from Fields, %s", err.Error())
} }
p[0] = float64(pt.Time().Unix()) p[0] = float64(m.Time().Unix())
pts[k] = p ms[k] = p
} }
return pts, nil return ms, nil
} }
func buildTags(ptTags map[string]string) []string { func buildTags(mTags map[string]string) []string {
tags := make([]string, len(ptTags)) tags := make([]string, len(mTags))
index := 0 index := 0
for k, v := range ptTags { for k, v := range mTags {
tags[index] = fmt.Sprintf("%s:%s", k, v) tags[index] = fmt.Sprintf("%s:%s", k, v)
index += 1 index += 1
} }

View File

@ -11,7 +11,7 @@ import (
"github.com/influxdata/telegraf/testutil" "github.com/influxdata/telegraf/testutil"
"github.com/influxdata/influxdb/client/v2" "github.com/influxdata/telegraf"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
@ -38,7 +38,7 @@ func TestUriOverride(t *testing.T) {
d.Apikey = "123456" d.Apikey = "123456"
err := d.Connect() err := d.Connect()
require.NoError(t, err) require.NoError(t, err)
err = d.Write(testutil.MockBatchPoints().Points()) err = d.Write(testutil.MockMetrics())
require.NoError(t, err) require.NoError(t, err)
} }
@ -57,7 +57,7 @@ func TestBadStatusCode(t *testing.T) {
d.Apikey = "123456" d.Apikey = "123456"
err := d.Connect() err := d.Connect()
require.NoError(t, err) require.NoError(t, err)
err = d.Write(testutil.MockBatchPoints().Points()) err = d.Write(testutil.MockMetrics())
if err == nil { if err == nil {
t.Errorf("error expected but none returned") t.Errorf("error expected but none returned")
} else { } else {
@ -100,12 +100,12 @@ func TestBuildTags(t *testing.T) {
func TestBuildPoint(t *testing.T) { func TestBuildPoint(t *testing.T) {
var tagtests = []struct { var tagtests = []struct {
ptIn *client.Point ptIn telegraf.Metric
outPt Point outPt Point
err error err error
}{ }{
{ {
testutil.TestPoint(0.0, "test1"), testutil.TestMetric(0.0, "test1"),
Point{ Point{
float64(time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix()), float64(time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix()),
0.0, 0.0,
@ -113,7 +113,7 @@ func TestBuildPoint(t *testing.T) {
nil, nil,
}, },
{ {
testutil.TestPoint(1.0, "test2"), testutil.TestMetric(1.0, "test2"),
Point{ Point{
float64(time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix()), float64(time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix()),
1.0, 1.0,
@ -121,7 +121,7 @@ func TestBuildPoint(t *testing.T) {
nil, nil,
}, },
{ {
testutil.TestPoint(10, "test3"), testutil.TestMetric(10, "test3"),
Point{ Point{
float64(time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix()), float64(time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix()),
10.0, 10.0,
@ -129,7 +129,7 @@ func TestBuildPoint(t *testing.T) {
nil, nil,
}, },
{ {
testutil.TestPoint(int32(112345), "test4"), testutil.TestMetric(int32(112345), "test4"),
Point{ Point{
float64(time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix()), float64(time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix()),
112345.0, 112345.0,
@ -137,7 +137,7 @@ func TestBuildPoint(t *testing.T) {
nil, nil,
}, },
{ {
testutil.TestPoint(int64(112345), "test5"), testutil.TestMetric(int64(112345), "test5"),
Point{ Point{
float64(time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix()), float64(time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix()),
112345.0, 112345.0,
@ -145,7 +145,7 @@ func TestBuildPoint(t *testing.T) {
nil, nil,
}, },
{ {
testutil.TestPoint(float32(11234.5), "test6"), testutil.TestMetric(float32(11234.5), "test6"),
Point{ Point{
float64(time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix()), float64(time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix()),
11234.5, 11234.5,
@ -153,7 +153,7 @@ func TestBuildPoint(t *testing.T) {
nil, nil,
}, },
{ {
testutil.TestPoint("11234.5", "test7"), testutil.TestMetric("11234.5", "test7"),
Point{ Point{
float64(time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix()), float64(time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix()),
11234.5, 11234.5,
@ -162,7 +162,7 @@ func TestBuildPoint(t *testing.T) {
}, },
} }
for _, tt := range tagtests { for _, tt := range tagtests {
pt, err := buildPoints(tt.ptIn) pt, err := buildMetrics(tt.ptIn)
if err != nil && tt.err == nil { if err != nil && tt.err == nil {
t.Errorf("%s: unexpected error, %+v\n", tt.ptIn.Name(), err) t.Errorf("%s: unexpected error, %+v\n", tt.ptIn.Name(), err)
} }

View File

@ -3,9 +3,8 @@ package graphite
import ( import (
"errors" "errors"
"fmt" "fmt"
"github.com/influxdata/influxdb/client/v2"
"github.com/influxdata/telegraf/plugins/outputs"
"github.com/influxdata/telegraf" "github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/outputs"
"log" "log"
"math/rand" "math/rand"
"net" "net"
@ -68,10 +67,10 @@ func (g *Graphite) Description() string {
// Choose a random server in the cluster to write to until a successful write // Choose a random server in the cluster to write to until a successful write
// occurs, logging each unsuccessful. If all servers fail, return error. // occurs, logging each unsuccessful. If all servers fail, return error.
func (g *Graphite) Write(points []*client.Point) error { func (g *Graphite) Write(metrics []telegraf.Metric) error {
// Prepare data // Prepare data
var bp []string var bp []string
for _, point := range points { for _, point := range metrics {
// Get name // Get name
name := point.Name() name := point.Name()
// Convert UnixNano to Unix timestamps // Convert UnixNano to Unix timestamps

View File

@ -8,7 +8,7 @@ import (
"testing" "testing"
"time" "time"
"github.com/influxdata/influxdb/client/v2" "github.com/influxdata/telegraf"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
@ -20,20 +20,20 @@ func TestGraphiteError(t *testing.T) {
Servers: []string{"127.0.0.1:2003", "127.0.0.1:12003"}, Servers: []string{"127.0.0.1:2003", "127.0.0.1:12003"},
Prefix: "my.prefix", Prefix: "my.prefix",
} }
// Init points // Init metrics
pt1, _ := client.NewPoint( pt1, _ := telegraf.NewMetric(
"mymeasurement", "mymeasurement",
map[string]string{"host": "192.168.0.1"}, map[string]string{"host": "192.168.0.1"},
map[string]interface{}{"mymeasurement": float64(3.14)}, map[string]interface{}{"mymeasurement": float64(3.14)},
time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC),
) )
// Prepare point list // Prepare point list
var points []*client.Point var metrics []telegraf.Metric
points = append(points, pt1) metrics = append(metrics, pt1)
// Error // Error
err1 := g.Connect() err1 := g.Connect()
require.NoError(t, err1) require.NoError(t, err1)
err2 := g.Write(points) err2 := g.Write(metrics)
require.Error(t, err2) require.Error(t, err2)
assert.Equal(t, "Could not write to any Graphite server in cluster\n", err2.Error()) assert.Equal(t, "Could not write to any Graphite server in cluster\n", err2.Error())
} }
@ -44,30 +44,30 @@ func TestGraphiteOK(t *testing.T) {
g := Graphite{ g := Graphite{
Prefix: "my.prefix", Prefix: "my.prefix",
} }
// Init points // Init metrics
pt1, _ := client.NewPoint( pt1, _ := telegraf.NewMetric(
"mymeasurement", "mymeasurement",
map[string]string{"host": "192.168.0.1"}, map[string]string{"host": "192.168.0.1"},
map[string]interface{}{"mymeasurement": float64(3.14)}, map[string]interface{}{"mymeasurement": float64(3.14)},
time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC),
) )
pt2, _ := client.NewPoint( pt2, _ := telegraf.NewMetric(
"mymeasurement", "mymeasurement",
map[string]string{"host": "192.168.0.1"}, map[string]string{"host": "192.168.0.1"},
map[string]interface{}{"value": float64(3.14)}, map[string]interface{}{"value": float64(3.14)},
time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC),
) )
pt3, _ := client.NewPoint( pt3, _ := telegraf.NewMetric(
"my_measurement", "my_measurement",
map[string]string{"host": "192.168.0.1"}, map[string]string{"host": "192.168.0.1"},
map[string]interface{}{"value": float64(3.14)}, map[string]interface{}{"value": float64(3.14)},
time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC),
) )
// Prepare point list // Prepare point list
var points []*client.Point var metrics []telegraf.Metric
points = append(points, pt1) metrics = append(metrics, pt1)
points = append(points, pt2) metrics = append(metrics, pt2)
points = append(points, pt3) metrics = append(metrics, pt3)
// Start TCP server // Start TCP server
wg.Add(1) wg.Add(1)
go TCPServer(t, &wg) go TCPServer(t, &wg)
@ -78,7 +78,7 @@ func TestGraphiteOK(t *testing.T) {
wg.Wait() wg.Wait()
require.NoError(t, err1) require.NoError(t, err1)
// Send Data // Send Data
err2 := g.Write(points) err2 := g.Write(metrics)
require.NoError(t, err2) require.NoError(t, err2)
wg.Add(1) wg.Add(1)
// Waiting TCPserver // Waiting TCPserver

View File

@ -9,10 +9,11 @@ import (
"strings" "strings"
"time" "time"
"github.com/influxdata/influxdb/client/v2" "github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/internal"
"github.com/influxdata/telegraf/plugins/outputs" "github.com/influxdata/telegraf/plugins/outputs"
"github.com/influxdata/telegraf"
"github.com/influxdata/influxdb/client/v2"
) )
type InfluxDB struct { type InfluxDB struct {
@ -131,14 +132,14 @@ func (i *InfluxDB) Description() string {
// Choose a random server in the cluster to write to until a successful write // Choose a random server in the cluster to write to until a successful write
// occurs, logging each unsuccessful. If all servers fail, return error. // occurs, logging each unsuccessful. If all servers fail, return error.
func (i *InfluxDB) Write(points []*client.Point) error { func (i *InfluxDB) Write(metrics []telegraf.Metric) error {
bp, _ := client.NewBatchPoints(client.BatchPointsConfig{ bp, _ := client.NewBatchPoints(client.BatchPointsConfig{
Database: i.Database, Database: i.Database,
Precision: i.Precision, Precision: i.Precision,
}) })
for _, point := range points { for _, metric := range metrics {
bp.AddPoint(point) bp.AddPoint(metric.Point())
} }
// This will get set to nil if a successful write occurs // This will get set to nil if a successful write occurs

View File

@ -18,7 +18,7 @@ func TestUDPInflux(t *testing.T) {
err := i.Connect() err := i.Connect()
require.NoError(t, err) require.NoError(t, err)
err = i.Write(testutil.MockBatchPoints().Points()) err = i.Write(testutil.MockMetrics())
require.NoError(t, err) require.NoError(t, err)
} }
@ -36,6 +36,6 @@ func TestHTTPInflux(t *testing.T) {
err := i.Connect() err := i.Connect()
require.NoError(t, err) require.NoError(t, err)
err = i.Write(testutil.MockBatchPoints().Points()) err = i.Write(testutil.MockMetrics())
require.NoError(t, err) require.NoError(t, err)
} }

View File

@ -6,9 +6,8 @@ import (
"errors" "errors"
"fmt" "fmt"
"github.com/Shopify/sarama" "github.com/Shopify/sarama"
"github.com/influxdata/influxdb/client/v2"
"github.com/influxdata/telegraf/plugins/outputs"
"github.com/influxdata/telegraf" "github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/outputs"
"io/ioutil" "io/ioutil"
) )
@ -113,14 +112,12 @@ func (k *Kafka) Description() string {
return "Configuration for the Kafka server to send metrics to" return "Configuration for the Kafka server to send metrics to"
} }
func (k *Kafka) Write(points []*client.Point) error { func (k *Kafka) Write(metrics []telegraf.Metric) error {
if len(points) == 0 { if len(metrics) == 0 {
return nil return nil
} }
for _, p := range points { for _, p := range metrics {
// Combine tags from Point and BatchPoints and grab the resulting
// line-protocol output string to write to Kafka
value := p.String() value := p.String()
m := &sarama.ProducerMessage{ m := &sarama.ProducerMessage{

View File

@ -23,6 +23,6 @@ func TestConnectAndWrite(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
// Verify that we can successfully write data to the kafka broker // Verify that we can successfully write data to the kafka broker
err = k.Write(testutil.MockBatchPoints().Points()) err = k.Write(testutil.MockMetrics())
require.NoError(t, err) require.NoError(t, err)
} }

View File

@ -14,9 +14,8 @@ import (
"github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/kinesis" "github.com/aws/aws-sdk-go/service/kinesis"
"github.com/influxdata/influxdb/client/v2"
"github.com/influxdata/telegraf/plugins/outputs"
"github.com/influxdata/telegraf" "github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/outputs"
) )
type KinesisOutput struct { type KinesisOutput struct {
@ -104,7 +103,7 @@ func (k *KinesisOutput) Close() error {
return nil return nil
} }
func FormatMetric(k *KinesisOutput, point *client.Point) (string, error) { func FormatMetric(k *KinesisOutput, point telegraf.Metric) (string, error) {
if k.Format == "string" { if k.Format == "string" {
return point.String(), nil return point.String(), nil
} else { } else {
@ -139,16 +138,16 @@ func writekinesis(k *KinesisOutput, r []*kinesis.PutRecordsRequestEntry) time.Du
return time.Since(start) return time.Since(start)
} }
func (k *KinesisOutput) Write(points []*client.Point) error { func (k *KinesisOutput) Write(metrics []telegraf.Metric) error {
var sz uint32 = 0 var sz uint32 = 0
if len(points) == 0 { if len(metrics) == 0 {
return nil return nil
} }
r := []*kinesis.PutRecordsRequestEntry{} r := []*kinesis.PutRecordsRequestEntry{}
for _, p := range points { for _, p := range metrics {
atomic.AddUint32(&sz, 1) atomic.AddUint32(&sz, 1)
metric, _ := FormatMetric(k, p) metric, _ := FormatMetric(k, p)

View File

@ -15,7 +15,7 @@ func TestFormatMetric(t *testing.T) {
Format: "string", Format: "string",
} }
p := testutil.MockBatchPoints().Points()[0] p := testutil.MockMetrics()[0]
valid_string := "test1,tag1=value1 value=1 1257894000000000000" valid_string := "test1,tag1=value1 value=1 1257894000000000000"
func_string, err := FormatMetric(k, p) func_string, err := FormatMetric(k, p)

View File

@ -7,10 +7,9 @@ import (
"log" "log"
"net/http" "net/http"
"github.com/influxdata/influxdb/client/v2" "github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/internal"
"github.com/influxdata/telegraf/plugins/outputs" "github.com/influxdata/telegraf/plugins/outputs"
"github.com/influxdata/telegraf"
) )
type Librato struct { type Librato struct {
@ -41,7 +40,7 @@ var sampleConfig = `
# timeout = "5s" # timeout = "5s"
` `
type Metrics struct { type LMetrics struct {
Gauges []*Gauge `json:"gauges"` Gauges []*Gauge `json:"gauges"`
} }
@ -70,27 +69,27 @@ func (l *Librato) Connect() error {
return nil return nil
} }
func (l *Librato) Write(points []*client.Point) error { func (l *Librato) Write(metrics []telegraf.Metric) error {
if len(points) == 0 { if len(metrics) == 0 {
return nil return nil
} }
metrics := Metrics{} lmetrics := LMetrics{}
tempGauges := []*Gauge{} tempGauges := []*Gauge{}
metricCounter := 0 metricCounter := 0
for _, pt := range points { for _, m := range metrics {
if gauges, err := l.buildGauges(pt); err == nil { if gauges, err := l.buildGauges(m); err == nil {
for _, gauge := range gauges { for _, gauge := range gauges {
tempGauges = append(tempGauges, gauge) tempGauges = append(tempGauges, gauge)
metricCounter++ metricCounter++
} }
} else { } else {
log.Printf("unable to build Gauge for %s, skipping\n", pt.Name()) log.Printf("unable to build Gauge for %s, skipping\n", m.Name())
} }
} }
metrics.Gauges = make([]*Gauge, metricCounter) lmetrics.Gauges = make([]*Gauge, metricCounter)
copy(metrics.Gauges, tempGauges[0:]) copy(lmetrics.Gauges, tempGauges[0:])
metricsBytes, err := json.Marshal(metrics) metricsBytes, err := json.Marshal(metrics)
if err != nil { if err != nil {
return fmt.Errorf("unable to marshal Metrics, %s\n", err.Error()) return fmt.Errorf("unable to marshal Metrics, %s\n", err.Error())
@ -123,19 +122,19 @@ func (l *Librato) Description() string {
return "Configuration for Librato API to send metrics to." return "Configuration for Librato API to send metrics to."
} }
func (l *Librato) buildGauges(pt *client.Point) ([]*Gauge, error) { func (l *Librato) buildGauges(m telegraf.Metric) ([]*Gauge, error) {
gauges := []*Gauge{} gauges := []*Gauge{}
for fieldName, value := range pt.Fields() { for fieldName, value := range m.Fields() {
gauge := &Gauge{ gauge := &Gauge{
Name: pt.Name() + "_" + fieldName, Name: m.Name() + "_" + fieldName,
MeasureTime: pt.Time().Unix(), MeasureTime: m.Time().Unix(),
} }
if err := gauge.setValue(value); err != nil { if err := gauge.setValue(value); err != nil {
return gauges, fmt.Errorf("unable to extract value from Fields, %s\n", return gauges, fmt.Errorf("unable to extract value from Fields, %s\n",
err.Error()) err.Error())
} }
if l.SourceTag != "" { if l.SourceTag != "" {
if source, ok := pt.Tags()[l.SourceTag]; ok { if source, ok := m.Tags()[l.SourceTag]; ok {
gauge.Source = source gauge.Source = source
} else { } else {
return gauges, return gauges,

View File

@ -11,7 +11,7 @@ import (
"github.com/influxdata/telegraf/testutil" "github.com/influxdata/telegraf/testutil"
"github.com/influxdata/influxdb/client/v2" "github.com/influxdata/telegraf"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
@ -39,7 +39,7 @@ func TestUriOverride(t *testing.T) {
l.ApiToken = "123456" l.ApiToken = "123456"
err := l.Connect() err := l.Connect()
require.NoError(t, err) require.NoError(t, err)
err = l.Write(testutil.MockBatchPoints().Points()) err = l.Write(testutil.MockMetrics())
require.NoError(t, err) require.NoError(t, err)
} }
@ -61,7 +61,7 @@ func TestBadStatusCode(t *testing.T) {
l.ApiToken = "123456" l.ApiToken = "123456"
err := l.Connect() err := l.Connect()
require.NoError(t, err) require.NoError(t, err)
err = l.Write(testutil.MockBatchPoints().Points()) err = l.Write(testutil.MockMetrics())
if err == nil { if err == nil {
t.Errorf("error expected but none returned") t.Errorf("error expected but none returned")
} else { } else {
@ -71,12 +71,12 @@ func TestBadStatusCode(t *testing.T) {
func TestBuildGauge(t *testing.T) { func TestBuildGauge(t *testing.T) {
var gaugeTests = []struct { var gaugeTests = []struct {
ptIn *client.Point ptIn telegraf.Metric
outGauge *Gauge outGauge *Gauge
err error err error
}{ }{
{ {
testutil.TestPoint(0.0, "test1"), testutil.TestMetric(0.0, "test1"),
&Gauge{ &Gauge{
Name: "test1", Name: "test1",
MeasureTime: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix(), MeasureTime: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix(),
@ -85,7 +85,7 @@ func TestBuildGauge(t *testing.T) {
nil, nil,
}, },
{ {
testutil.TestPoint(1.0, "test2"), testutil.TestMetric(1.0, "test2"),
&Gauge{ &Gauge{
Name: "test2", Name: "test2",
MeasureTime: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix(), MeasureTime: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix(),
@ -94,7 +94,7 @@ func TestBuildGauge(t *testing.T) {
nil, nil,
}, },
{ {
testutil.TestPoint(10, "test3"), testutil.TestMetric(10, "test3"),
&Gauge{ &Gauge{
Name: "test3", Name: "test3",
MeasureTime: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix(), MeasureTime: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix(),
@ -103,7 +103,7 @@ func TestBuildGauge(t *testing.T) {
nil, nil,
}, },
{ {
testutil.TestPoint(int32(112345), "test4"), testutil.TestMetric(int32(112345), "test4"),
&Gauge{ &Gauge{
Name: "test4", Name: "test4",
MeasureTime: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix(), MeasureTime: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix(),
@ -112,7 +112,7 @@ func TestBuildGauge(t *testing.T) {
nil, nil,
}, },
{ {
testutil.TestPoint(int64(112345), "test5"), testutil.TestMetric(int64(112345), "test5"),
&Gauge{ &Gauge{
Name: "test5", Name: "test5",
MeasureTime: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix(), MeasureTime: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix(),
@ -121,7 +121,7 @@ func TestBuildGauge(t *testing.T) {
nil, nil,
}, },
{ {
testutil.TestPoint(float32(11234.5), "test6"), testutil.TestMetric(float32(11234.5), "test6"),
&Gauge{ &Gauge{
Name: "test6", Name: "test6",
MeasureTime: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix(), MeasureTime: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix(),
@ -130,7 +130,7 @@ func TestBuildGauge(t *testing.T) {
nil, nil,
}, },
{ {
testutil.TestPoint("11234.5", "test7"), testutil.TestMetric("11234.5", "test7"),
&Gauge{ &Gauge{
Name: "test7", Name: "test7",
MeasureTime: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix(), MeasureTime: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC).Unix(),
@ -161,20 +161,20 @@ func TestBuildGauge(t *testing.T) {
} }
func TestBuildGaugeWithSource(t *testing.T) { func TestBuildGaugeWithSource(t *testing.T) {
pt1, _ := client.NewPoint( pt1, _ := telegraf.NewMetric(
"test1", "test1",
map[string]string{"hostname": "192.168.0.1"}, map[string]string{"hostname": "192.168.0.1"},
map[string]interface{}{"value": 0.0}, map[string]interface{}{"value": 0.0},
time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC),
) )
pt2, _ := client.NewPoint( pt2, _ := telegraf.NewMetric(
"test2", "test2",
map[string]string{"hostnam": "192.168.0.1"}, map[string]string{"hostnam": "192.168.0.1"},
map[string]interface{}{"value": 1.0}, map[string]interface{}{"value": 1.0},
time.Date(2010, time.December, 10, 23, 0, 0, 0, time.UTC), time.Date(2010, time.December, 10, 23, 0, 0, 0, time.UTC),
) )
var gaugeTests = []struct { var gaugeTests = []struct {
ptIn *client.Point ptIn telegraf.Metric
outGauge *Gauge outGauge *Gauge
err error err error
}{ }{

View File

@ -10,10 +10,9 @@ import (
"sync" "sync"
paho "git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git" paho "git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git"
"github.com/influxdata/influxdb/client/v2" "github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/internal"
"github.com/influxdata/telegraf/plugins/outputs" "github.com/influxdata/telegraf/plugins/outputs"
"github.com/influxdata/telegraf"
) )
const MaxClientIdLen = 8 const MaxClientIdLen = 8
@ -79,18 +78,18 @@ func (m *MQTT) Description() string {
return "Configuration for MQTT server to send metrics to" return "Configuration for MQTT server to send metrics to"
} }
func (m *MQTT) Write(points []*client.Point) error { func (m *MQTT) Write(metrics []telegraf.Metric) error {
m.Lock() m.Lock()
defer m.Unlock() defer m.Unlock()
if len(points) == 0 { if len(metrics) == 0 {
return nil return nil
} }
hostname, ok := points[0].Tags()["host"] hostname, ok := metrics[0].Tags()["host"]
if !ok { if !ok {
hostname = "" hostname = ""
} }
for _, p := range points { for _, p := range metrics {
var t []string var t []string
if m.TopicPrefix != "" { if m.TopicPrefix != "" {
t = append(t, m.TopicPrefix) t = append(t, m.TopicPrefix)

View File

@ -22,6 +22,6 @@ func TestConnectAndWrite(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
// Verify that we can successfully write data to the mqtt broker // Verify that we can successfully write data to the mqtt broker
err = m.Write(testutil.MockBatchPoints().Points()) err = m.Write(testutil.MockMetrics())
require.NoError(t, err) require.NoError(t, err)
} }

View File

@ -2,9 +2,8 @@ package nsq
import ( import (
"fmt" "fmt"
"github.com/influxdata/influxdb/client/v2"
"github.com/influxdata/telegraf/plugins/outputs"
"github.com/influxdata/telegraf" "github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/outputs"
"github.com/nsqio/go-nsq" "github.com/nsqio/go-nsq"
) )
@ -46,14 +45,12 @@ func (n *NSQ) Description() string {
return "Send telegraf measurements to NSQD" return "Send telegraf measurements to NSQD"
} }
func (n *NSQ) Write(points []*client.Point) error { func (n *NSQ) Write(metrics []telegraf.Metric) error {
if len(points) == 0 { if len(metrics) == 0 {
return nil return nil
} }
for _, p := range points { for _, p := range metrics {
// Combine tags from Point and BatchPoints and grab the resulting
// line-protocol output string to write to NSQ
value := p.String() value := p.String()
err := n.producer.Publish(n.Topic, []byte(value)) err := n.producer.Publish(n.Topic, []byte(value))

View File

@ -23,6 +23,6 @@ func TestConnectAndWrite(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
// Verify that we can successfully write data to the NSQ daemon // Verify that we can successfully write data to the NSQ daemon
err = n.Write(testutil.MockBatchPoints().Points()) err = n.Write(testutil.MockMetrics())
require.NoError(t, err) require.NoError(t, err)
} }

View File

@ -8,9 +8,8 @@ import (
"strings" "strings"
"time" "time"
"github.com/influxdata/influxdb/client/v2"
"github.com/influxdata/telegraf/plugins/outputs"
"github.com/influxdata/telegraf" "github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/outputs"
) )
type OpenTSDB struct { type OpenTSDB struct {
@ -59,8 +58,8 @@ func (o *OpenTSDB) Connect() error {
return nil return nil
} }
func (o *OpenTSDB) Write(points []*client.Point) error { func (o *OpenTSDB) Write(metrics []telegraf.Metric) error {
if len(points) == 0 { if len(metrics) == 0 {
return nil return nil
} }
now := time.Now() now := time.Now()
@ -74,8 +73,8 @@ func (o *OpenTSDB) Write(points []*client.Point) error {
} }
defer connection.Close() defer connection.Close()
for _, pt := range points { for _, m := range metrics {
for _, metric := range buildMetrics(pt, now, o.Prefix) { for _, metric := range buildMetrics(m, now, o.Prefix) {
messageLine := fmt.Sprintf("put %s %v %s %s\n", messageLine := fmt.Sprintf("put %s %v %s %s\n",
metric.Metric, metric.Timestamp, metric.Value, metric.Tags) metric.Metric, metric.Timestamp, metric.Value, metric.Tags)
if o.Debug { if o.Debug {
@ -91,10 +90,10 @@ func (o *OpenTSDB) Write(points []*client.Point) error {
return nil return nil
} }
func buildTags(ptTags map[string]string) []string { func buildTags(mTags map[string]string) []string {
tags := make([]string, len(ptTags)) tags := make([]string, len(mTags))
index := 0 index := 0
for k, v := range ptTags { for k, v := range mTags {
tags[index] = fmt.Sprintf("%s=%s", k, v) tags[index] = fmt.Sprintf("%s=%s", k, v)
index += 1 index += 1
} }
@ -102,11 +101,11 @@ func buildTags(ptTags map[string]string) []string {
return tags return tags
} }
func buildMetrics(pt *client.Point, now time.Time, prefix string) []*MetricLine { func buildMetrics(m telegraf.Metric, now time.Time, prefix string) []*MetricLine {
ret := []*MetricLine{} ret := []*MetricLine{}
for fieldName, value := range pt.Fields() { for fieldName, value := range m.Fields() {
metric := &MetricLine{ metric := &MetricLine{
Metric: fmt.Sprintf("%s%s_%s", prefix, pt.Name(), fieldName), Metric: fmt.Sprintf("%s%s_%s", prefix, m.Name(), fieldName),
Timestamp: now.Unix(), Timestamp: now.Unix(),
} }
@ -116,7 +115,7 @@ func buildMetrics(pt *client.Point, now time.Time, prefix string) []*MetricLine
continue continue
} }
metric.Value = metricValue metric.Value = metricValue
tagsSlice := buildTags(pt.Tags()) tagsSlice := buildTags(m.Tags())
metric.Tags = fmt.Sprint(strings.Join(tagsSlice, " ")) metric.Tags = fmt.Sprint(strings.Join(tagsSlice, " "))
ret = append(ret, metric) ret = append(ret, metric)
} }

View File

@ -54,18 +54,18 @@ func TestWrite(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
// Verify that we can successfully write data to OpenTSDB // Verify that we can successfully write data to OpenTSDB
err = o.Write(testutil.MockBatchPoints().Points()) err = o.Write(testutil.MockMetrics())
require.NoError(t, err) require.NoError(t, err)
// Verify postive and negative test cases of writing data // Verify postive and negative test cases of writing data
bp := testutil.MockBatchPoints() metrics := testutil.MockMetrics()
bp.AddPoint(testutil.TestPoint(float64(1.0), "justametric.float")) metrics = append(metrics, testutil.TestMetric(float64(1.0), "justametric.float"))
bp.AddPoint(testutil.TestPoint(int64(123456789), "justametric.int")) metrics = append(metrics, testutil.TestMetric(int64(123456789), "justametric.int"))
bp.AddPoint(testutil.TestPoint(uint64(123456789012345), "justametric.uint")) metrics = append(metrics, testutil.TestMetric(uint64(123456789012345), "justametric.uint"))
bp.AddPoint(testutil.TestPoint("Lorem Ipsum", "justametric.string")) metrics = append(metrics, testutil.TestMetric("Lorem Ipsum", "justametric.string"))
bp.AddPoint(testutil.TestPoint(float64(42.0), "justametric.anotherfloat")) metrics = append(metrics, testutil.TestMetric(float64(42.0), "justametric.anotherfloat"))
err = o.Write(bp.Points()) err = o.Write(metrics)
require.NoError(t, err) require.NoError(t, err)
} }

View File

@ -5,9 +5,8 @@ import (
"log" "log"
"net/http" "net/http"
"github.com/influxdata/influxdb/client/v2"
"github.com/influxdata/telegraf/plugins/outputs"
"github.com/influxdata/telegraf" "github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/outputs"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
) )
@ -59,12 +58,12 @@ func (p *PrometheusClient) Description() string {
return "Configuration for the Prometheus client to spawn" return "Configuration for the Prometheus client to spawn"
} }
func (p *PrometheusClient) Write(points []*client.Point) error { func (p *PrometheusClient) Write(metrics []telegraf.Metric) error {
if len(points) == 0 { if len(metrics) == 0 {
return nil return nil
} }
for _, point := range points { for _, point := range metrics {
var labels []string var labels []string
key := point.Name() key := point.Name()

View File

@ -5,7 +5,7 @@ import (
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"github.com/influxdata/influxdb/client/v2" "github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/inputs/prometheus" "github.com/influxdata/telegraf/plugins/inputs/prometheus"
"github.com/influxdata/telegraf/testutil" "github.com/influxdata/telegraf/testutil"
) )
@ -21,19 +21,19 @@ func TestPrometheusWritePointEmptyTag(t *testing.T) {
Urls: []string{"http://localhost:9126/metrics"}, Urls: []string{"http://localhost:9126/metrics"},
} }
tags := make(map[string]string) tags := make(map[string]string)
pt1, _ := client.NewPoint( pt1, _ := telegraf.NewMetric(
"test_point_1", "test_point_1",
tags, tags,
map[string]interface{}{"value": 0.0}) map[string]interface{}{"value": 0.0})
pt2, _ := client.NewPoint( pt2, _ := telegraf.NewMetric(
"test_point_2", "test_point_2",
tags, tags,
map[string]interface{}{"value": 1.0}) map[string]interface{}{"value": 1.0})
var points = []*client.Point{ var metrics = []telegraf.Metric{
pt1, pt1,
pt2, pt2,
} }
require.NoError(t, pTesting.Write(points)) require.NoError(t, pTesting.Write(metrics))
expected := []struct { expected := []struct {
name string name string
@ -63,19 +63,19 @@ func TestPrometheusWritePointTag(t *testing.T) {
} }
tags := make(map[string]string) tags := make(map[string]string)
tags["testtag"] = "testvalue" tags["testtag"] = "testvalue"
pt1, _ := client.NewPoint( pt1, _ := telegraf.NewMetric(
"test_point_3", "test_point_3",
tags, tags,
map[string]interface{}{"value": 0.0}) map[string]interface{}{"value": 0.0})
pt2, _ := client.NewPoint( pt2, _ := telegraf.NewMetric(
"test_point_4", "test_point_4",
tags, tags,
map[string]interface{}{"value": 1.0}) map[string]interface{}{"value": 1.0})
var points = []*client.Point{ var metrics = []telegraf.Metric{
pt1, pt1,
pt2, pt2,
} }
require.NoError(t, pTesting.Write(points)) require.NoError(t, pTesting.Write(metrics))
expected := []struct { expected := []struct {
name string name string

View File

@ -6,9 +6,8 @@ import (
"os" "os"
"github.com/amir/raidman" "github.com/amir/raidman"
"github.com/influxdata/influxdb/client/v2"
"github.com/influxdata/telegraf/plugins/outputs"
"github.com/influxdata/telegraf" "github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/outputs"
) )
type Riemann struct { type Riemann struct {
@ -49,13 +48,13 @@ func (r *Riemann) Description() string {
return "Configuration for the Riemann server to send metrics to" return "Configuration for the Riemann server to send metrics to"
} }
func (r *Riemann) Write(points []*client.Point) error { func (r *Riemann) Write(metrics []telegraf.Metric) error {
if len(points) == 0 { if len(metrics) == 0 {
return nil return nil
} }
var events []*raidman.Event var events []*raidman.Event
for _, p := range points { for _, p := range metrics {
evs := buildEvents(p) evs := buildEvents(p)
for _, ev := range evs { for _, ev := range evs {
events = append(events, ev) events = append(events, ev)
@ -71,7 +70,7 @@ func (r *Riemann) Write(points []*client.Point) error {
return nil return nil
} }
func buildEvents(p *client.Point) []*raidman.Event { func buildEvents(p telegraf.Metric) []*raidman.Event {
events := []*raidman.Event{} events := []*raidman.Event{}
for fieldName, value := range p.Fields() { for fieldName, value := range p.Fields() {
host, ok := p.Tags()["host"] host, ok := p.Tags()["host"]

View File

@ -22,6 +22,6 @@ func TestConnectAndWrite(t *testing.T) {
err := r.Connect() err := r.Connect()
require.NoError(t, err) require.NoError(t, err)
err = r.Write(testutil.MockBatchPoints().Points()) err = r.Write(testutil.MockMetrics())
require.NoError(t, err) require.NoError(t, err)
} }

View File

@ -11,15 +11,15 @@ import (
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
) )
// Point defines a single point measurement // Metric defines a single point measurement
type Point struct { type Metric struct {
Measurement string Measurement string
Tags map[string]string Tags map[string]string
Fields map[string]interface{} Fields map[string]interface{}
Time time.Time Time time.Time
} }
func (p *Point) String() string { func (p *Metric) String() string {
return fmt.Sprintf("%s %v", p.Measurement, p.Fields) return fmt.Sprintf("%s %v", p.Measurement, p.Fields)
} }
@ -27,7 +27,7 @@ func (p *Point) String() string {
type Accumulator struct { type Accumulator struct {
sync.Mutex sync.Mutex
Points []*Point Metrics []*Metric
debug bool debug bool
} }
@ -74,14 +74,14 @@ func (a *Accumulator) AddFields(
fmt.Print(msg) fmt.Print(msg)
} }
p := &Point{ p := &Metric{
Measurement: measurement, Measurement: measurement,
Fields: fields, Fields: fields,
Tags: tags, Tags: tags,
Time: t, Time: t,
} }
a.Points = append(a.Points, p) a.Metrics = append(a.Metrics, p)
} }
func (a *Accumulator) Debug() bool { func (a *Accumulator) Debug() bool {
@ -95,8 +95,8 @@ func (a *Accumulator) SetDebug(debug bool) {
} }
// Get gets the specified measurement point from the accumulator // Get gets the specified measurement point from the accumulator
func (a *Accumulator) Get(measurement string) (*Point, bool) { func (a *Accumulator) Get(measurement string) (*Metric, bool) {
for _, p := range a.Points { for _, p := range a.Metrics {
if p.Measurement == measurement { if p.Measurement == measurement {
return p, true return p, true
} }
@ -109,7 +109,7 @@ func (a *Accumulator) Get(measurement string) (*Point, bool) {
// measurements // measurements
func (a *Accumulator) NFields() int { func (a *Accumulator) NFields() int {
counter := 0 counter := 0
for _, pt := range a.Points { for _, pt := range a.Metrics {
for _, _ = range pt.Fields { for _, _ = range pt.Fields {
counter++ counter++
} }
@ -123,7 +123,7 @@ func (a *Accumulator) AssertContainsTaggedFields(
fields map[string]interface{}, fields map[string]interface{},
tags map[string]string, tags map[string]string,
) { ) {
for _, p := range a.Points { for _, p := range a.Metrics {
if !reflect.DeepEqual(tags, p.Tags) { if !reflect.DeepEqual(tags, p.Tags) {
continue continue
} }
@ -148,7 +148,7 @@ func (a *Accumulator) AssertContainsFields(
measurement string, measurement string,
fields map[string]interface{}, fields map[string]interface{},
) { ) {
for _, p := range a.Points { for _, p := range a.Metrics {
if p.Measurement == measurement { if p.Measurement == measurement {
if !reflect.DeepEqual(fields, p.Fields) { if !reflect.DeepEqual(fields, p.Fields) {
pActual, _ := json.MarshalIndent(p.Fields, "", " ") pActual, _ := json.MarshalIndent(p.Fields, "", " ")
@ -166,7 +166,7 @@ func (a *Accumulator) AssertContainsFields(
// HasIntValue returns true if the measurement has an Int value // HasIntValue returns true if the measurement has an Int value
func (a *Accumulator) HasIntField(measurement string, field string) bool { func (a *Accumulator) HasIntField(measurement string, field string) bool {
for _, p := range a.Points { for _, p := range a.Metrics {
if p.Measurement == measurement { if p.Measurement == measurement {
for fieldname, value := range p.Fields { for fieldname, value := range p.Fields {
if fieldname == field { if fieldname == field {
@ -182,7 +182,7 @@ func (a *Accumulator) HasIntField(measurement string, field string) bool {
// HasUIntValue returns true if the measurement has a UInt value // HasUIntValue returns true if the measurement has a UInt value
func (a *Accumulator) HasUIntField(measurement string, field string) bool { func (a *Accumulator) HasUIntField(measurement string, field string) bool {
for _, p := range a.Points { for _, p := range a.Metrics {
if p.Measurement == measurement { if p.Measurement == measurement {
for fieldname, value := range p.Fields { for fieldname, value := range p.Fields {
if fieldname == field { if fieldname == field {
@ -198,7 +198,7 @@ func (a *Accumulator) HasUIntField(measurement string, field string) bool {
// HasFloatValue returns true if the given measurement has a float value // HasFloatValue returns true if the given measurement has a float value
func (a *Accumulator) HasFloatField(measurement string, field string) bool { func (a *Accumulator) HasFloatField(measurement string, field string) bool {
for _, p := range a.Points { for _, p := range a.Metrics {
if p.Measurement == measurement { if p.Measurement == measurement {
for fieldname, value := range p.Fields { for fieldname, value := range p.Fields {
if fieldname == field { if fieldname == field {
@ -215,7 +215,7 @@ func (a *Accumulator) HasFloatField(measurement string, field string) bool {
// HasMeasurement returns true if the accumulator has a measurement with the // HasMeasurement returns true if the accumulator has a measurement with the
// given name // given name
func (a *Accumulator) HasMeasurement(measurement string) bool { func (a *Accumulator) HasMeasurement(measurement string) bool {
for _, p := range a.Points { for _, p := range a.Metrics {
if p.Measurement == measurement { if p.Measurement == measurement {
return true return true
} }

View File

@ -6,7 +6,7 @@ import (
"os" "os"
"time" "time"
"github.com/influxdata/influxdb/client/v2" "github.com/influxdata/telegraf"
) )
var localhost = "localhost" var localhost = "localhost"
@ -31,21 +31,21 @@ func GetLocalHost() string {
return localhost return localhost
} }
// MockBatchPoints returns a mock BatchPoints object for using in unit tests // MockMetrics returns a mock []telegraf.Metric object for using in unit tests
// of telegraf output sinks. // of telegraf output sinks.
func MockBatchPoints() client.BatchPoints { func MockMetrics() []telegraf.Metric {
metrics := make([]telegraf.Metric, 0)
// Create a new point batch // Create a new point batch
bp, _ := client.NewBatchPoints(client.BatchPointsConfig{}) metrics = append(metrics, TestMetric(1.0))
bp.AddPoint(TestPoint(1.0)) return metrics
return bp
} }
// TestPoint Returns a simple test point: // TestMetric Returns a simple test point:
// measurement -> "test1" or name // measurement -> "test1" or name
// tags -> "tag1":"value1" // tags -> "tag1":"value1"
// value -> value // value -> value
// time -> time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC) // time -> time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC)
func TestPoint(value interface{}, name ...string) *client.Point { func TestMetric(value interface{}, name ...string) telegraf.Metric {
if value == nil { if value == nil {
panic("Cannot use a nil value") panic("Cannot use a nil value")
} }
@ -54,7 +54,7 @@ func TestPoint(value interface{}, name ...string) *client.Point {
measurement = name[0] measurement = name[0]
} }
tags := map[string]string{"tag1": "value1"} tags := map[string]string{"tag1": "value1"}
pt, _ := client.NewPoint( pt, _ := telegraf.NewMetric(
measurement, measurement,
tags, tags,
map[string]interface{}{"value": value}, map[string]interface{}{"value": value},