Adding TTL metrics data

closes #1060
This commit is contained in:
Victor Garcia 2016-04-19 20:55:03 +02:00 committed by Cameron Sparr
parent 46543d6323
commit 46aaaa9b70
5 changed files with 63 additions and 37 deletions

View File

@ -30,6 +30,7 @@ based on _prefix_ in addition to globs. This means that a filter like
- [#1017](https://github.com/influxdata/telegraf/pull/1017): taginclude and tagexclude arguments. - [#1017](https://github.com/influxdata/telegraf/pull/1017): taginclude and tagexclude arguments.
- [#1015](https://github.com/influxdata/telegraf/pull/1015): Docker plugin schema refactor. - [#1015](https://github.com/influxdata/telegraf/pull/1015): Docker plugin schema refactor.
- [#889](https://github.com/influxdata/telegraf/pull/889): Improved MySQL plugin. Thanks @maksadbek! - [#889](https://github.com/influxdata/telegraf/pull/889): Improved MySQL plugin. Thanks @maksadbek!
- [#1060](https://github.com/influxdata/telegraf/pull/1060): TTL metrics added to MongoDB input plugin
### Bugfixes ### Bugfixes

View File

@ -48,6 +48,5 @@ and create a single measurement containing values e.g.
* resident_megabytes * resident_megabytes
* updates_per_sec * updates_per_sec
* vsize_megabytes * vsize_megabytes
* ttl_deletes_per_sec
* ttl_passes_per_sec

View File

@ -42,6 +42,8 @@ var DefaultStats = map[string]string{
"net_in_bytes": "NetIn", "net_in_bytes": "NetIn",
"net_out_bytes": "NetOut", "net_out_bytes": "NetOut",
"open_connections": "NumConnections", "open_connections": "NumConnections",
"ttl_deletes_per_sec": "DeletedDocuments",
"ttl_passes_per_sec": "Passes",
} }
var DefaultReplStats = map[string]string{ var DefaultReplStats = map[string]string{

View File

@ -31,6 +31,8 @@ func TestAddNonReplStats(t *testing.T) {
NetIn: 0, NetIn: 0,
NetOut: 0, NetOut: 0,
NumConnections: 0, NumConnections: 0,
Passes: 0,
DeletedDocuments: 0,
}, },
tags, tags,
) )
@ -128,6 +130,8 @@ func TestStateTag(t *testing.T) {
"resident_megabytes": int64(0), "resident_megabytes": int64(0),
"updates_per_sec": int64(0), "updates_per_sec": int64(0),
"vsize_megabytes": int64(0), "vsize_megabytes": int64(0),
"ttl_deletes_per_sec": int64(0),
"ttl_passes_per_sec": int64(0),
} }
acc.AssertContainsTaggedFields(t, "mongodb", fields, stateTags) acc.AssertContainsTaggedFields(t, "mongodb", fields, stateTags)
} }

View File

@ -54,6 +54,7 @@ type ServerStatus struct {
ShardCursorType map[string]interface{} `bson:"shardCursorType"` ShardCursorType map[string]interface{} `bson:"shardCursorType"`
StorageEngine map[string]string `bson:"storageEngine"` StorageEngine map[string]string `bson:"storageEngine"`
WiredTiger *WiredTiger `bson:"wiredTiger"` WiredTiger *WiredTiger `bson:"wiredTiger"`
Metrics *MetricsStats `bson:"metrics"`
} }
// WiredTiger stores information related to the WiredTiger storage engine. // WiredTiger stores information related to the WiredTiger storage engine.
@ -194,6 +195,17 @@ type OpcountStats struct {
Command int64 `bson:"command"` Command int64 `bson:"command"`
} }
// MetricsStats stores information related to metrics
type MetricsStats struct {
TTL *TTLStats `bson:"ttl"`
}
// TTLStats stores information related to documents with a ttl index.
type TTLStats struct {
DeletedDocuments int64 `bson:"deletedDocuments"`
Passes int64 `bson:"passes"`
}
// ReadWriteLockTimes stores time spent holding read/write locks. // ReadWriteLockTimes stores time spent holding read/write locks.
type ReadWriteLockTimes struct { type ReadWriteLockTimes struct {
Read int64 `bson:"R"` Read int64 `bson:"R"`
@ -332,6 +344,9 @@ type StatLine struct {
// Opcounter fields // Opcounter fields
Insert, Query, Update, Delete, GetMore, Command int64 Insert, Query, Update, Delete, GetMore, Command int64
// TTL fields
Passes, DeletedDocuments int64
// Collection locks (3.0 mmap only) // Collection locks (3.0 mmap only)
CollectionLocks *CollectionLockStatus CollectionLocks *CollectionLockStatus
@ -423,6 +438,11 @@ func NewStatLine(oldStat, newStat ServerStatus, key string, all bool, sampleSecs
returnVal.Command = diff(newStat.Opcounters.Command, oldStat.Opcounters.Command, sampleSecs) returnVal.Command = diff(newStat.Opcounters.Command, oldStat.Opcounters.Command, sampleSecs)
} }
if newStat.Metrics.TTL != nil && oldStat.Metrics.TTL != nil {
returnVal.Passes = diff(newStat.Metrics.TTL.Passes, oldStat.Metrics.TTL.Passes, sampleSecs)
returnVal.DeletedDocuments = diff(newStat.Metrics.TTL.DeletedDocuments, oldStat.Metrics.TTL.DeletedDocuments, sampleSecs)
}
if newStat.OpcountersRepl != nil && oldStat.OpcountersRepl != nil { if newStat.OpcountersRepl != nil && oldStat.OpcountersRepl != nil {
returnVal.InsertR = diff(newStat.OpcountersRepl.Insert, oldStat.OpcountersRepl.Insert, sampleSecs) returnVal.InsertR = diff(newStat.OpcountersRepl.Insert, oldStat.OpcountersRepl.Insert, sampleSecs)
returnVal.QueryR = diff(newStat.OpcountersRepl.Query, oldStat.OpcountersRepl.Query, sampleSecs) returnVal.QueryR = diff(newStat.OpcountersRepl.Query, oldStat.OpcountersRepl.Query, sampleSecs)