fixed code regarding needless type casting; single creation of map

This commit is contained in:
Maksadbek 2016-03-25 04:06:36 +05:00
parent 67d1e7a745
commit cf6666b6f0
1 changed files with 39 additions and 38 deletions

View File

@ -733,7 +733,6 @@ func (m *Mysql) gatherBinaryLogs(db *sql.DB, serv string, acc telegraf.Accumulat
servtag = "localhost" servtag = "localhost"
} }
tags := map[string]string{"server": servtag} tags := map[string]string{"server": servtag}
fields := make(map[string]interface{})
var ( var (
size uint64 = 0 size uint64 = 0
count uint64 = 0 count uint64 = 0
@ -749,8 +748,10 @@ func (m *Mysql) gatherBinaryLogs(db *sql.DB, serv string, acc telegraf.Accumulat
size += fileSize size += fileSize
count++ count++
} }
fields["binary_size_bytes"] = size fields := map[string]interface{}{
fields["binary_files_count"] = count "binary_size_bytes": size,
"binary_files_count": count,
}
acc.AddFields("mysql", fields, tags) acc.AddFields("mysql", fields, tags)
return nil return nil
} }
@ -839,12 +840,11 @@ func (m *Mysql) gatherGlobalStatuses(db *sql.DB, serv string, acc telegraf.Accum
} }
fields["syncs"] = i fields["syncs"] = i
} }
// Send any remaining fields
if len(fields) > 0 {
acc.AddFields("mysql", fields, tags)
}
} }
// Send any remaining fields
if len(fields) > 0 {
acc.AddFields("mysql", fields, tags)
}
// gather connection metrics from processlist for each user // gather connection metrics from processlist for each user
if m.GatherProcessList { if m.GatherProcessList {
conn_rows, err := db.Query("SELECT user, sum(1) FROM INFORMATION_SCHEMA.PROCESSLIST GROUP BY user") conn_rows, err := db.Query("SELECT user, sum(1) FROM INFORMATION_SCHEMA.PROCESSLIST GROUP BY user")
@ -931,8 +931,8 @@ func (m *Mysql) gatherPerfTableIOWaits(db *sql.DB, serv string, acc telegraf.Acc
defer rows.Close() defer rows.Close()
var ( var (
objSchema, objName, servtag string objSchema, objName, servtag string
countFetch, countInsert, countUpdate, countDelete uint64 countFetch, countInsert, countUpdate, countDelete float64
timeFetch, timeInsert, timeUpdate, timeDelete uint64 timeFetch, timeInsert, timeUpdate, timeDelete float64
) )
servtag, err = parseDSN(serv) servtag, err = parseDSN(serv)
@ -955,16 +955,17 @@ func (m *Mysql) gatherPerfTableIOWaits(db *sql.DB, serv string, acc telegraf.Acc
"schema": objSchema, "schema": objSchema,
"name": objName, "name": objName,
} }
fields := make(map[string]interface{})
fields["table_io_waits_total_fetch"] = float64(countFetch)
fields["table_io_waits_total_insert"] = float64(countInsert)
fields["table_io_waits_total_update"] = float64(countUpdate)
fields["table_io_waits_total_delete"] = float64(countDelete)
fields["table_io_waits_seconds_total_fetch"] = float64(timeFetch) / picoSeconds fields := map[string]interface{}{
fields["table_io_waits_seconds_total_insert"] = float64(timeInsert) / picoSeconds "table_io_waits_total_fetch": countFetch,
fields["table_io_waits_seconds_total_update"] = float64(timeUpdate) / picoSeconds "table_io_waits_total_insert": countInsert,
fields["table_io_waits_seconds_total_delete"] = float64(timeDelete) / picoSeconds "table_io_waits_total_update": countUpdate,
"table_io_waits_total_delete": countDelete,
"table_io_waits_seconds_total_fetch": timeFetch / picoSeconds,
"table_io_waits_seconds_total_insert": timeInsert / picoSeconds,
"table_io_waits_seconds_total_update": timeUpdate / picoSeconds,
"table_io_waits_seconds_total_delete": timeDelete / picoSeconds,
}
acc.AddFields("mysql_perf_schema", fields, tags) acc.AddFields("mysql_perf_schema", fields, tags)
} }
@ -1364,11 +1365,11 @@ func (m *Mysql) gatherPerfEventsStatements(db *sql.DB, serv string, acc telegraf
var ( var (
schemaName, digest, digest_text string schemaName, digest, digest_text string
count, queryTime, errors, warnings uint64 count, queryTime, errors, warnings float64
rowsAffected, rowsSent, rowsExamined uint64 rowsAffected, rowsSent, rowsExamined float64
tmpTables, tmpDiskTables uint64 tmpTables, tmpDiskTables float64
sortMergePasses, sortRows uint64 sortMergePasses, sortRows float64
noIndexUsed uint64 noIndexUsed float64
) )
servtag, err := parseDSN(serv) servtag, err := parseDSN(serv)
@ -1395,20 +1396,20 @@ func (m *Mysql) gatherPerfEventsStatements(db *sql.DB, serv string, acc telegraf
tags["digest"] = digest tags["digest"] = digest
tags["digest_text"] = digest_text tags["digest_text"] = digest_text
fields := make(map[string]interface{}) fields := map[string]interface{}{
"events_statements_total": count,
fields["events_statements_total"] = float64(count) "events_statements_seconds_total": queryTime / picoSeconds,
fields["events_statements_seconds_total"] = float64(queryTime) / picoSeconds "events_statements_errors_total": errors,
fields["events_statements_errors_total"] = float64(errors) "events_statements_warnings_total": warnings,
fields["events_statements_warnings_total"] = float64(warnings) "events_statements_rows_affected_total": rowsAffected,
fields["events_statements_rows_affected_total"] = float64(rowsAffected) "events_statements_rows_sent_total": rowsSent,
fields["events_statements_rows_sent_total"] = float64(rowsSent) "events_statements_rows_examined_total": rowsExamined,
fields["events_statements_rows_examined_total"] = float64(rowsExamined) "events_statements_tmp_tables_total": tmpTables,
fields["events_statements_tmp_tables_total"] = float64(tmpTables) "events_statements_tmp_disk_tables_total": tmpDiskTables,
fields["events_statements_tmp_disk_tables_total"] = float64(tmpDiskTables) "events_statements_sort_merge_passes_total": sortMergePasses,
fields["events_statements_sort_merge_passes_total"] = float64(sortMergePasses) "events_statements_sort_rows_total": sortRows,
fields["events_statements_sort_rows_total"] = float64(sortRows) "events_statements_no_index_used_total": noIndexUsed,
fields["events_statements_no_index_used_total"] = float64(noIndexUsed) }
acc.AddFields("mysql_perf_schema", fields, tags) acc.AddFields("mysql_perf_schema", fields, tags)
} }