Adding TTL metrics data
This commit is contained in:
parent
a585119a67
commit
a7ad016c96
|
@ -30,6 +30,7 @@ based on _prefix_ in addition to globs. This means that a filter like
|
|||
- [#1017](https://github.com/influxdata/telegraf/pull/1017): taginclude and tagexclude arguments.
|
||||
- [#1015](https://github.com/influxdata/telegraf/pull/1015): Docker plugin schema refactor.
|
||||
- [#889](https://github.com/influxdata/telegraf/pull/889): Improved MySQL plugin. Thanks @maksadbek!
|
||||
- [#1060](https://github.com/influxdata/telegraf/pull/1060): TTL metrics added to MongoDB input plugin
|
||||
|
||||
### Bugfixes
|
||||
|
||||
|
|
|
@ -48,6 +48,5 @@ and create a single measurement containing values e.g.
|
|||
* resident_megabytes
|
||||
* updates_per_sec
|
||||
* vsize_megabytes
|
||||
|
||||
|
||||
|
||||
* ttl_deletes_per_sec
|
||||
* ttl_passes_per_sec
|
||||
|
|
|
@ -26,22 +26,24 @@ func NewMongodbData(statLine *StatLine, tags map[string]string) *MongodbData {
|
|||
}
|
||||
|
||||
var DefaultStats = map[string]string{
|
||||
"inserts_per_sec": "Insert",
|
||||
"queries_per_sec": "Query",
|
||||
"updates_per_sec": "Update",
|
||||
"deletes_per_sec": "Delete",
|
||||
"getmores_per_sec": "GetMore",
|
||||
"commands_per_sec": "Command",
|
||||
"flushes_per_sec": "Flushes",
|
||||
"vsize_megabytes": "Virtual",
|
||||
"resident_megabytes": "Resident",
|
||||
"queued_reads": "QueuedReaders",
|
||||
"queued_writes": "QueuedWriters",
|
||||
"active_reads": "ActiveReaders",
|
||||
"active_writes": "ActiveWriters",
|
||||
"net_in_bytes": "NetIn",
|
||||
"net_out_bytes": "NetOut",
|
||||
"open_connections": "NumConnections",
|
||||
"inserts_per_sec": "Insert",
|
||||
"queries_per_sec": "Query",
|
||||
"updates_per_sec": "Update",
|
||||
"deletes_per_sec": "Delete",
|
||||
"getmores_per_sec": "GetMore",
|
||||
"commands_per_sec": "Command",
|
||||
"flushes_per_sec": "Flushes",
|
||||
"vsize_megabytes": "Virtual",
|
||||
"resident_megabytes": "Resident",
|
||||
"queued_reads": "QueuedReaders",
|
||||
"queued_writes": "QueuedWriters",
|
||||
"active_reads": "ActiveReaders",
|
||||
"active_writes": "ActiveWriters",
|
||||
"net_in_bytes": "NetIn",
|
||||
"net_out_bytes": "NetOut",
|
||||
"open_connections": "NumConnections",
|
||||
"ttl_deletes_per_sec": "DeletedDocuments",
|
||||
"ttl_passes_per_sec": "Passes",
|
||||
}
|
||||
|
||||
var DefaultReplStats = map[string]string{
|
||||
|
|
|
@ -13,24 +13,26 @@ var tags = make(map[string]string)
|
|||
func TestAddNonReplStats(t *testing.T) {
|
||||
d := NewMongodbData(
|
||||
&StatLine{
|
||||
StorageEngine: "",
|
||||
Time: time.Now(),
|
||||
Insert: 0,
|
||||
Query: 0,
|
||||
Update: 0,
|
||||
Delete: 0,
|
||||
GetMore: 0,
|
||||
Command: 0,
|
||||
Flushes: 0,
|
||||
Virtual: 0,
|
||||
Resident: 0,
|
||||
QueuedReaders: 0,
|
||||
QueuedWriters: 0,
|
||||
ActiveReaders: 0,
|
||||
ActiveWriters: 0,
|
||||
NetIn: 0,
|
||||
NetOut: 0,
|
||||
NumConnections: 0,
|
||||
StorageEngine: "",
|
||||
Time: time.Now(),
|
||||
Insert: 0,
|
||||
Query: 0,
|
||||
Update: 0,
|
||||
Delete: 0,
|
||||
GetMore: 0,
|
||||
Command: 0,
|
||||
Flushes: 0,
|
||||
Virtual: 0,
|
||||
Resident: 0,
|
||||
QueuedReaders: 0,
|
||||
QueuedWriters: 0,
|
||||
ActiveReaders: 0,
|
||||
ActiveWriters: 0,
|
||||
NetIn: 0,
|
||||
NetOut: 0,
|
||||
NumConnections: 0,
|
||||
Passes: 0,
|
||||
DeletedDocuments: 0,
|
||||
},
|
||||
tags,
|
||||
)
|
||||
|
@ -128,6 +130,8 @@ func TestStateTag(t *testing.T) {
|
|||
"resident_megabytes": int64(0),
|
||||
"updates_per_sec": int64(0),
|
||||
"vsize_megabytes": int64(0),
|
||||
"ttl_deletes_per_sec": int64(0),
|
||||
"ttl_passes_per_sec": int64(0),
|
||||
}
|
||||
acc.AssertContainsTaggedFields(t, "mongodb", fields, stateTags)
|
||||
}
|
||||
|
|
|
@ -54,6 +54,7 @@ type ServerStatus struct {
|
|||
ShardCursorType map[string]interface{} `bson:"shardCursorType"`
|
||||
StorageEngine map[string]string `bson:"storageEngine"`
|
||||
WiredTiger *WiredTiger `bson:"wiredTiger"`
|
||||
Metrics *MetricsStats `bson:"metrics"`
|
||||
}
|
||||
|
||||
// WiredTiger stores information related to the WiredTiger storage engine.
|
||||
|
@ -194,6 +195,17 @@ type OpcountStats struct {
|
|||
Command int64 `bson:"command"`
|
||||
}
|
||||
|
||||
// MetricsStats stores information related to metrics
|
||||
type MetricsStats struct {
|
||||
TTL *TTLStats `bson:"ttl"`
|
||||
}
|
||||
|
||||
// TTLStats stores information related to documents with a ttl index.
|
||||
type TTLStats struct {
|
||||
DeletedDocuments int64 `bson:"deletedDocuments"`
|
||||
Passes int64 `bson:"passes"`
|
||||
}
|
||||
|
||||
// ReadWriteLockTimes stores time spent holding read/write locks.
|
||||
type ReadWriteLockTimes struct {
|
||||
Read int64 `bson:"R"`
|
||||
|
@ -332,6 +344,9 @@ type StatLine struct {
|
|||
// Opcounter fields
|
||||
Insert, Query, Update, Delete, GetMore, Command int64
|
||||
|
||||
// TTL fields
|
||||
Passes, DeletedDocuments int64
|
||||
|
||||
// Collection locks (3.0 mmap only)
|
||||
CollectionLocks *CollectionLockStatus
|
||||
|
||||
|
@ -423,6 +438,11 @@ func NewStatLine(oldStat, newStat ServerStatus, key string, all bool, sampleSecs
|
|||
returnVal.Command = diff(newStat.Opcounters.Command, oldStat.Opcounters.Command, sampleSecs)
|
||||
}
|
||||
|
||||
if newStat.Metrics.TTL != nil && oldStat.Metrics.TTL != nil {
|
||||
returnVal.Passes = diff(newStat.Metrics.TTL.Passes, oldStat.Metrics.TTL.Passes, sampleSecs)
|
||||
returnVal.DeletedDocuments = diff(newStat.Metrics.TTL.DeletedDocuments, oldStat.Metrics.TTL.DeletedDocuments, sampleSecs)
|
||||
}
|
||||
|
||||
if newStat.OpcountersRepl != nil && oldStat.OpcountersRepl != nil {
|
||||
returnVal.InsertR = diff(newStat.OpcountersRepl.Insert, oldStat.OpcountersRepl.Insert, sampleSecs)
|
||||
returnVal.QueryR = diff(newStat.OpcountersRepl.Query, oldStat.OpcountersRepl.Query, sampleSecs)
|
||||
|
|
Loading…
Reference in New Issue