Adding TTL metrics data

This commit is contained in:
Victor Garcia 2016-04-19 20:55:03 +02:00
parent a585119a67
commit a7ad016c96
5 changed files with 63 additions and 37 deletions

View File

@ -30,6 +30,7 @@ based on _prefix_ in addition to globs. This means that a filter like
- [#1017](https://github.com/influxdata/telegraf/pull/1017): taginclude and tagexclude arguments. - [#1017](https://github.com/influxdata/telegraf/pull/1017): taginclude and tagexclude arguments.
- [#1015](https://github.com/influxdata/telegraf/pull/1015): Docker plugin schema refactor. - [#1015](https://github.com/influxdata/telegraf/pull/1015): Docker plugin schema refactor.
- [#889](https://github.com/influxdata/telegraf/pull/889): Improved MySQL plugin. Thanks @maksadbek! - [#889](https://github.com/influxdata/telegraf/pull/889): Improved MySQL plugin. Thanks @maksadbek!
- [#1060](https://github.com/influxdata/telegraf/pull/1060): TTL metrics added to MongoDB input plugin
### Bugfixes ### Bugfixes

View File

@ -48,6 +48,5 @@ and create a single measurement containing values e.g.
* resident_megabytes * resident_megabytes
* updates_per_sec * updates_per_sec
* vsize_megabytes * vsize_megabytes
* ttl_deletes_per_sec
* ttl_passes_per_sec

View File

@ -26,22 +26,24 @@ func NewMongodbData(statLine *StatLine, tags map[string]string) *MongodbData {
} }
var DefaultStats = map[string]string{ var DefaultStats = map[string]string{
"inserts_per_sec": "Insert", "inserts_per_sec": "Insert",
"queries_per_sec": "Query", "queries_per_sec": "Query",
"updates_per_sec": "Update", "updates_per_sec": "Update",
"deletes_per_sec": "Delete", "deletes_per_sec": "Delete",
"getmores_per_sec": "GetMore", "getmores_per_sec": "GetMore",
"commands_per_sec": "Command", "commands_per_sec": "Command",
"flushes_per_sec": "Flushes", "flushes_per_sec": "Flushes",
"vsize_megabytes": "Virtual", "vsize_megabytes": "Virtual",
"resident_megabytes": "Resident", "resident_megabytes": "Resident",
"queued_reads": "QueuedReaders", "queued_reads": "QueuedReaders",
"queued_writes": "QueuedWriters", "queued_writes": "QueuedWriters",
"active_reads": "ActiveReaders", "active_reads": "ActiveReaders",
"active_writes": "ActiveWriters", "active_writes": "ActiveWriters",
"net_in_bytes": "NetIn", "net_in_bytes": "NetIn",
"net_out_bytes": "NetOut", "net_out_bytes": "NetOut",
"open_connections": "NumConnections", "open_connections": "NumConnections",
"ttl_deletes_per_sec": "DeletedDocuments",
"ttl_passes_per_sec": "Passes",
} }
var DefaultReplStats = map[string]string{ var DefaultReplStats = map[string]string{

View File

@ -13,24 +13,26 @@ var tags = make(map[string]string)
func TestAddNonReplStats(t *testing.T) { func TestAddNonReplStats(t *testing.T) {
d := NewMongodbData( d := NewMongodbData(
&StatLine{ &StatLine{
StorageEngine: "", StorageEngine: "",
Time: time.Now(), Time: time.Now(),
Insert: 0, Insert: 0,
Query: 0, Query: 0,
Update: 0, Update: 0,
Delete: 0, Delete: 0,
GetMore: 0, GetMore: 0,
Command: 0, Command: 0,
Flushes: 0, Flushes: 0,
Virtual: 0, Virtual: 0,
Resident: 0, Resident: 0,
QueuedReaders: 0, QueuedReaders: 0,
QueuedWriters: 0, QueuedWriters: 0,
ActiveReaders: 0, ActiveReaders: 0,
ActiveWriters: 0, ActiveWriters: 0,
NetIn: 0, NetIn: 0,
NetOut: 0, NetOut: 0,
NumConnections: 0, NumConnections: 0,
Passes: 0,
DeletedDocuments: 0,
}, },
tags, tags,
) )
@ -128,6 +130,8 @@ func TestStateTag(t *testing.T) {
"resident_megabytes": int64(0), "resident_megabytes": int64(0),
"updates_per_sec": int64(0), "updates_per_sec": int64(0),
"vsize_megabytes": int64(0), "vsize_megabytes": int64(0),
"ttl_deletes_per_sec": int64(0),
"ttl_passes_per_sec": int64(0),
} }
acc.AssertContainsTaggedFields(t, "mongodb", fields, stateTags) acc.AssertContainsTaggedFields(t, "mongodb", fields, stateTags)
} }

View File

@ -54,6 +54,7 @@ type ServerStatus struct {
ShardCursorType map[string]interface{} `bson:"shardCursorType"` ShardCursorType map[string]interface{} `bson:"shardCursorType"`
StorageEngine map[string]string `bson:"storageEngine"` StorageEngine map[string]string `bson:"storageEngine"`
WiredTiger *WiredTiger `bson:"wiredTiger"` WiredTiger *WiredTiger `bson:"wiredTiger"`
Metrics *MetricsStats `bson:"metrics"`
} }
// WiredTiger stores information related to the WiredTiger storage engine. // WiredTiger stores information related to the WiredTiger storage engine.
@ -194,6 +195,17 @@ type OpcountStats struct {
Command int64 `bson:"command"` Command int64 `bson:"command"`
} }
// MetricsStats stores information related to metrics
type MetricsStats struct {
TTL *TTLStats `bson:"ttl"`
}
// TTLStats stores information related to documents with a ttl index.
type TTLStats struct {
DeletedDocuments int64 `bson:"deletedDocuments"`
Passes int64 `bson:"passes"`
}
// ReadWriteLockTimes stores time spent holding read/write locks. // ReadWriteLockTimes stores time spent holding read/write locks.
type ReadWriteLockTimes struct { type ReadWriteLockTimes struct {
Read int64 `bson:"R"` Read int64 `bson:"R"`
@ -332,6 +344,9 @@ type StatLine struct {
// Opcounter fields // Opcounter fields
Insert, Query, Update, Delete, GetMore, Command int64 Insert, Query, Update, Delete, GetMore, Command int64
// TTL fields
Passes, DeletedDocuments int64
// Collection locks (3.0 mmap only) // Collection locks (3.0 mmap only)
CollectionLocks *CollectionLockStatus CollectionLocks *CollectionLockStatus
@ -423,6 +438,11 @@ func NewStatLine(oldStat, newStat ServerStatus, key string, all bool, sampleSecs
returnVal.Command = diff(newStat.Opcounters.Command, oldStat.Opcounters.Command, sampleSecs) returnVal.Command = diff(newStat.Opcounters.Command, oldStat.Opcounters.Command, sampleSecs)
} }
if newStat.Metrics.TTL != nil && oldStat.Metrics.TTL != nil {
returnVal.Passes = diff(newStat.Metrics.TTL.Passes, oldStat.Metrics.TTL.Passes, sampleSecs)
returnVal.DeletedDocuments = diff(newStat.Metrics.TTL.DeletedDocuments, oldStat.Metrics.TTL.DeletedDocuments, sampleSecs)
}
if newStat.OpcountersRepl != nil && oldStat.OpcountersRepl != nil { if newStat.OpcountersRepl != nil && oldStat.OpcountersRepl != nil {
returnVal.InsertR = diff(newStat.OpcountersRepl.Insert, oldStat.OpcountersRepl.Insert, sampleSecs) returnVal.InsertR = diff(newStat.OpcountersRepl.Insert, oldStat.OpcountersRepl.Insert, sampleSecs)
returnVal.QueryR = diff(newStat.OpcountersRepl.Query, oldStat.OpcountersRepl.Query, sampleSecs) returnVal.QueryR = diff(newStat.OpcountersRepl.Query, oldStat.OpcountersRepl.Query, sampleSecs)