Compare commits

..

1 Commits

Author SHA1 Message Date
Jack Zampolin
67318cb032 add working USGS earthquake plugin 2016-12-07 15:57:18 -08:00
11 changed files with 315 additions and 357 deletions

View File

@@ -13,7 +13,6 @@
- [#1348](https://github.com/influxdata/telegraf/issues/1348): Telegraf "internal" plugin for collecting stats on itself.
- [#2127](https://github.com/influxdata/telegraf/pull/2127): Update Go version to 1.7.4.
- [#2126](https://github.com/influxdata/telegraf/pull/2126): Support a metric.Split function.
- [#2026](https://github.com/influxdata/telegraf/pull/2065): elasticsearch "shield" (basic auth) support doc.
### Bugfixes
@@ -21,13 +20,6 @@
- [#1949](https://github.com/influxdata/telegraf/issues/1949): Fix windows `net` plugin.
- [#1775](https://github.com/influxdata/telegraf/issues/1775): Cache & expire metrics for delivery to prometheus
## v1.1.2 [2016-12-12]
### Bugfixes
- [#2007](https://github.com/influxdata/telegraf/issues/2007): Make snmptranslate not required when using numeric OID.
- [#2104](https://github.com/influxdata/telegraf/issues/2104): Add a global snmp translation cache.
## v1.1.1 [2016-11-14]
### Bugfixes

View File

@@ -5,8 +5,8 @@ machine:
- sudo service zookeeper stop
- go version
- go version | grep 1.7.4 || sudo rm -rf /usr/local/go
- wget https://storage.googleapis.com/golang/go1.8beta1.linux-amd64.tar.gz
- sudo tar -C /usr/local -xzf go1.8beta1.linux-amd64.tar.gz
- wget https://storage.googleapis.com/golang/go1.7.4.linux-amd64.tar.gz
- sudo tar -C /usr/local -xzf go1.7.4.linux-amd64.tar.gz
- go version
dependencies:

View File

@@ -6,24 +6,18 @@ import (
"log"
"os"
"os/signal"
"path"
"path/filepath"
"plugin"
"runtime"
"strings"
"syscall"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/agent"
"github.com/influxdata/telegraf/internal/config"
"github.com/influxdata/telegraf/logger"
"github.com/influxdata/telegraf/plugins/aggregators"
_ "github.com/influxdata/telegraf/plugins/aggregators/all"
"github.com/influxdata/telegraf/plugins/inputs"
_ "github.com/influxdata/telegraf/plugins/inputs/all"
"github.com/influxdata/telegraf/plugins/outputs"
_ "github.com/influxdata/telegraf/plugins/outputs/all"
"github.com/influxdata/telegraf/plugins/processors"
_ "github.com/influxdata/telegraf/plugins/processors/all"
"github.com/kardianos/service"
)
@@ -56,8 +50,6 @@ var fUsage = flag.String("usage", "",
"print usage for a plugin, ie, 'telegraf -usage mysql'")
var fService = flag.String("service", "",
"operate on the service")
var fPlugins = flag.String("plugins", "",
"path to directory containing external plugins")
// Telegraf version, populated linker.
// ie, -ldflags "-X main.version=`git describe --always --tags`"
@@ -312,93 +304,9 @@ func (p *program) Stop(s service.Service) error {
return nil
}
// loadExternalPlugins loads external plugins from shared libraries (.so, .dll, etc.)
// in the specified directory.
func loadExternalPlugins(dir string) error {
return filepath.Walk(dir, func(pth string, info os.FileInfo, err error) error {
// Stop if there was an error.
if err != nil {
return err
}
// Ignore directories.
if info.IsDir() {
return nil
}
// Ignore files that aren't shared libraries.
ext := strings.ToLower(path.Ext(pth))
if ext != ".so" && ext != ".dll" {
return nil
}
// Load plugin.
p, err := plugin.Open(pth)
if err != nil {
return err
}
// Register plugin.
if err := registerPlugin(dir, pth, p); err != nil {
return err
}
return nil
})
}
// registerPlugin registers an external plugin with telegraf.
func registerPlugin(pluginsDir, filePath string, p *plugin.Plugin) error {
// Clean the file path and make sure it's relative to the root plugins directory.
// This is done because plugin names are namespaced using the directory
// structure. E.g., if the root plugin directory, passed in the pluginsDir
// argument, is '/home/jdoe/bin/telegraf/plugins' and we're registering plugin
// '/home/jdoe/bin/telegraf/plugins/input/mysql.so'
pluginsDir = filepath.Clean(pluginsDir)
parentDir, _ := filepath.Split(pluginsDir)
var err error
if filePath, err = filepath.Rel(parentDir, filePath); err != nil {
return err
}
// Strip the file extension and save it.
ext := path.Ext(filePath)
filePath = strings.TrimSuffix(filePath, ext)
// Convert path separators to "." to generate a plugin name namespaced by directory names.
name := strings.Replace(filePath, string(os.PathSeparator), ".", -1)
if create, err := p.Lookup("NewInput"); err == nil {
inputs.Add(name, inputs.Creator(create.(func() telegraf.Input)))
} else if create, err := p.Lookup("NewOutput"); err == nil {
outputs.Add(name, outputs.Creator(create.(func() telegraf.Output)))
} else if create, err := p.Lookup("NewProcessor"); err == nil {
processors.Add(name, processors.Creator(create.(func() telegraf.Processor)))
} else if create, err := p.Lookup("NewAggregator"); err == nil {
aggregators.Add(name, aggregators.Creator(create.(func() telegraf.Aggregator)))
} else {
return fmt.Errorf("not a telegraf plugin: %s%s", filePath, ext)
}
log.Printf("I! Registered: %s (from %s%s)\n", name, filePath, ext)
return nil
}
func main() {
flag.Usage = func() { usageExit(0) }
flag.Parse()
// Load external plugins, if requested.
if *fPlugins != "" {
pluginsDir, err := filepath.Abs(*fPlugins)
if err != nil {
log.Fatal("E! " + err.Error())
}
log.Printf("I! Loading external plugins from: %s\n", pluginsDir)
if err := loadExternalPlugins(*fPlugins); err != nil {
log.Fatal("E! " + err.Error())
}
}
if runtime.GOOS == "windows" {
svcConfig := &service.Config{
Name: "telegraf",

View File

@@ -777,8 +777,6 @@
# # Read stats from one or more Elasticsearch servers or clusters
# [[inputs.elasticsearch]]
# ## specify a list of one or more Elasticsearch servers
# # you can add username and password to your url to use basic authentication:
# # servers = ["http://user:pass@localhost:9200"]
# servers = ["http://localhost:9200"]
#
# ## Timeout for HTTP requests to the elastic search server(s)

View File

@@ -75,6 +75,7 @@ import (
_ "github.com/influxdata/telegraf/plugins/inputs/trig"
_ "github.com/influxdata/telegraf/plugins/inputs/twemproxy"
_ "github.com/influxdata/telegraf/plugins/inputs/udp_listener"
_ "github.com/influxdata/telegraf/plugins/inputs/usgs"
_ "github.com/influxdata/telegraf/plugins/inputs/varnish"
_ "github.com/influxdata/telegraf/plugins/inputs/webhooks"
_ "github.com/influxdata/telegraf/plugins/inputs/win_perf_counters"

View File

@@ -60,8 +60,6 @@ type indexHealth struct {
const sampleConfig = `
## specify a list of one or more Elasticsearch servers
# you can add username and password to your url to use basic authentication:
# servers = ["http://user:pass@localhost:9200"]
servers = ["http://localhost:9200"]
## Timeout for HTTP requests to the elastic search server(s)

View File

@@ -8,7 +8,6 @@ import (
"os/exec"
"strconv"
"strings"
"sync"
"time"
"github.com/influxdata/telegraf"
@@ -200,22 +199,65 @@ func (t *Table) init() error {
return nil
}
// initBuild initializes the table if it has an OID configured. If so, the
// net-snmp tools will be used to look up the OID and auto-populate the table's
// fields.
// init() populates Fields if a table OID is provided.
func (t *Table) initBuild() error {
if t.Oid == "" {
return nil
}
_, _, oidText, fields, err := snmpTable(t.Oid)
mibName, _, oidText, _, err := snmpTranslate(t.Oid)
if err != nil {
return Errorf(err, "initializing table %s", t.Oid)
return Errorf(err, "translating %s", t.Oid)
}
if t.Name == "" {
t.Name = oidText
}
t.Fields = append(t.Fields, fields...)
mibPrefix := mibName + "::"
oidFullName := mibPrefix + oidText
// first attempt to get the table's tags
tagOids := map[string]struct{}{}
// We have to guess that the "entry" oid is `t.Oid+".1"`. snmptable and snmptranslate don't seem to have a way to provide the info.
if out, err := execCmd("snmptranslate", "-Td", oidFullName+".1"); err == nil {
lines := bytes.Split(out, []byte{'\n'})
for _, line := range lines {
if !bytes.HasPrefix(line, []byte(" INDEX")) {
continue
}
i := bytes.Index(line, []byte("{ "))
if i == -1 { // parse error
continue
}
line = line[i+2:]
i = bytes.Index(line, []byte(" }"))
if i == -1 { // parse error
continue
}
line = line[:i]
for _, col := range bytes.Split(line, []byte(", ")) {
tagOids[mibPrefix+string(col)] = struct{}{}
}
}
}
// this won't actually try to run a query. The `-Ch` will just cause it to dump headers.
out, err := execCmd("snmptable", "-Ch", "-Cl", "-c", "public", "127.0.0.1", oidFullName)
if err != nil {
return Errorf(err, "getting table columns for %s", t.Oid)
}
cols := bytes.SplitN(out, []byte{'\n'}, 2)[0]
if len(cols) == 0 {
return fmt.Errorf("unable to get columns for table %s", t.Oid)
}
for _, col := range bytes.Split(cols, []byte{' '}) {
if len(col) == 0 {
continue
}
col := string(col)
_, isTag := tagOids[mibPrefix+col]
t.Fields = append(t.Fields, Field{Name: col, Oid: mibPrefix + col, IsTag: isTag})
}
return nil
}
@@ -799,141 +841,13 @@ func fieldConvert(conv string, v interface{}) (interface{}, error) {
return nil, fmt.Errorf("invalid conversion type '%s'", conv)
}
type snmpTableCache struct {
mibName string
oidNum string
oidText string
fields []Field
err error
}
var snmpTableCaches map[string]snmpTableCache
var snmpTableCachesLock sync.Mutex
// snmpTable resolves the given OID as a table, providing information about the
// table and fields within.
func snmpTable(oid string) (mibName string, oidNum string, oidText string, fields []Field, err error) {
snmpTableCachesLock.Lock()
if snmpTableCaches == nil {
snmpTableCaches = map[string]snmpTableCache{}
}
var stc snmpTableCache
var ok bool
if stc, ok = snmpTableCaches[oid]; !ok {
stc.mibName, stc.oidNum, stc.oidText, stc.fields, stc.err = snmpTableCall(oid)
snmpTableCaches[oid] = stc
}
snmpTableCachesLock.Unlock()
return stc.mibName, stc.oidNum, stc.oidText, stc.fields, stc.err
}
func snmpTableCall(oid string) (mibName string, oidNum string, oidText string, fields []Field, err error) {
mibName, oidNum, oidText, _, err = snmpTranslate(oid)
if err != nil {
return "", "", "", nil, Errorf(err, "translating")
}
mibPrefix := mibName + "::"
oidFullName := mibPrefix + oidText
// first attempt to get the table's tags
tagOids := map[string]struct{}{}
// We have to guess that the "entry" oid is `oid+".1"`. snmptable and snmptranslate don't seem to have a way to provide the info.
if out, err := execCmd("snmptranslate", "-Td", oidFullName+".1"); err == nil {
lines := bytes.Split(out, []byte{'\n'})
for _, line := range lines {
if !bytes.HasPrefix(line, []byte(" INDEX")) {
continue
}
i := bytes.Index(line, []byte("{ "))
if i == -1 { // parse error
continue
}
line = line[i+2:]
i = bytes.Index(line, []byte(" }"))
if i == -1 { // parse error
continue
}
line = line[:i]
for _, col := range bytes.Split(line, []byte(", ")) {
tagOids[mibPrefix+string(col)] = struct{}{}
}
}
}
// this won't actually try to run a query. The `-Ch` will just cause it to dump headers.
out, err := execCmd("snmptable", "-Ch", "-Cl", "-c", "public", "127.0.0.1", oidFullName)
if err != nil {
return "", "", "", nil, Errorf(err, "getting table columns")
}
cols := bytes.SplitN(out, []byte{'\n'}, 2)[0]
if len(cols) == 0 {
return "", "", "", nil, fmt.Errorf("could not find any columns in table")
}
for _, col := range bytes.Split(cols, []byte{' '}) {
if len(col) == 0 {
continue
}
col := string(col)
_, isTag := tagOids[mibPrefix+col]
fields = append(fields, Field{Name: col, Oid: mibPrefix + col, IsTag: isTag})
}
return mibName, oidNum, oidText, fields, err
}
type snmpTranslateCache struct {
mibName string
oidNum string
oidText string
conversion string
err error
}
var snmpTranslateCachesLock sync.Mutex
var snmpTranslateCaches map[string]snmpTranslateCache
// snmpTranslate resolves the given OID.
func snmpTranslate(oid string) (mibName string, oidNum string, oidText string, conversion string, err error) {
snmpTranslateCachesLock.Lock()
if snmpTranslateCaches == nil {
snmpTranslateCaches = map[string]snmpTranslateCache{}
}
var stc snmpTranslateCache
var ok bool
if stc, ok = snmpTranslateCaches[oid]; !ok {
// This will result in only one call to snmptranslate running at a time.
// We could speed it up by putting a lock in snmpTranslateCache and then
// returning it immediately, and multiple callers would then release the
// snmpTranslateCachesLock and instead wait on the individual
// snmpTranlsation.Lock to release. But I don't know that the extra complexity
// is worth it. Especially when it would slam the system pretty hard if lots
// of lookups are being perfomed.
stc.mibName, stc.oidNum, stc.oidText, stc.conversion, stc.err = snmpTranslateCall(oid)
snmpTranslateCaches[oid] = stc
}
snmpTranslateCachesLock.Unlock()
return stc.mibName, stc.oidNum, stc.oidText, stc.conversion, stc.err
}
func snmpTranslateCall(oid string) (mibName string, oidNum string, oidText string, conversion string, err error) {
var out []byte
if strings.ContainsAny(oid, ":abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") {
out, err = execCmd("snmptranslate", "-Td", "-Ob", oid)
} else {
out, err = execCmd("snmptranslate", "-Td", "-Ob", "-m", "all", oid)
if err, ok := err.(*exec.Error); ok && err.Err == exec.ErrNotFound {
// Silently discard error if snmptranslate not found and we have a numeric OID.
// Meaning we can get by without the lookup.
return "", oid, oid, "", nil
}
}
if err != nil {
return "", "", "", "", err

View File

@@ -4,7 +4,6 @@ package snmp
import (
"fmt"
"net"
"os/exec"
"sync"
"testing"
"time"
@@ -199,56 +198,6 @@ func TestSnmpInit(t *testing.T) {
}, s.Fields[0])
}
func TestSnmpInit_noTranslate(t *testing.T) {
// override execCommand so it returns exec.ErrNotFound
defer func(ec func(string, ...string) *exec.Cmd) { execCommand = ec }(execCommand)
execCommand = func(_ string, _ ...string) *exec.Cmd {
return exec.Command("snmptranslateExecErrNotFound")
}
s := &Snmp{
Fields: []Field{
{Oid: ".1.1.1.1", Name: "one", IsTag: true},
{Oid: ".1.1.1.2", Name: "two"},
{Oid: ".1.1.1.3"},
},
Tables: []Table{
{Fields: []Field{
{Oid: ".1.1.1.4", Name: "four", IsTag: true},
{Oid: ".1.1.1.5", Name: "five"},
{Oid: ".1.1.1.6"},
}},
},
}
err := s.init()
require.NoError(t, err)
assert.Equal(t, ".1.1.1.1", s.Fields[0].Oid)
assert.Equal(t, "one", s.Fields[0].Name)
assert.Equal(t, true, s.Fields[0].IsTag)
assert.Equal(t, ".1.1.1.2", s.Fields[1].Oid)
assert.Equal(t, "two", s.Fields[1].Name)
assert.Equal(t, false, s.Fields[1].IsTag)
assert.Equal(t, ".1.1.1.3", s.Fields[2].Oid)
assert.Equal(t, ".1.1.1.3", s.Fields[2].Name)
assert.Equal(t, false, s.Fields[2].IsTag)
assert.Equal(t, ".1.1.1.4", s.Tables[0].Fields[0].Oid)
assert.Equal(t, "four", s.Tables[0].Fields[0].Name)
assert.Equal(t, true, s.Tables[0].Fields[0].IsTag)
assert.Equal(t, ".1.1.1.5", s.Tables[0].Fields[1].Oid)
assert.Equal(t, "five", s.Tables[0].Fields[1].Name)
assert.Equal(t, false, s.Tables[0].Fields[1].IsTag)
assert.Equal(t, ".1.1.1.6", s.Tables[0].Fields[2].Oid)
assert.Equal(t, ".1.1.1.6", s.Tables[0].Fields[2].Name)
assert.Equal(t, false, s.Tables[0].Fields[2].IsTag)
}
func TestGetSNMPConnection_v2(t *testing.T) {
s := &Snmp{
Timeout: internal.Duration{Duration: 3 * time.Second},
@@ -648,71 +597,6 @@ func TestFieldConvert(t *testing.T) {
}
}
func TestSnmpTranslateCache_miss(t *testing.T) {
snmpTranslateCaches = nil
oid := "IF-MIB::ifPhysAddress.1"
mibName, oidNum, oidText, conversion, err := snmpTranslate(oid)
assert.Len(t, snmpTranslateCaches, 1)
stc := snmpTranslateCaches[oid]
require.NotNil(t, stc)
assert.Equal(t, mibName, stc.mibName)
assert.Equal(t, oidNum, stc.oidNum)
assert.Equal(t, oidText, stc.oidText)
assert.Equal(t, conversion, stc.conversion)
assert.Equal(t, err, stc.err)
}
func TestSnmpTranslateCache_hit(t *testing.T) {
snmpTranslateCaches = map[string]snmpTranslateCache{
"foo": snmpTranslateCache{
mibName: "a",
oidNum: "b",
oidText: "c",
conversion: "d",
err: fmt.Errorf("e"),
},
}
mibName, oidNum, oidText, conversion, err := snmpTranslate("foo")
assert.Equal(t, "a", mibName)
assert.Equal(t, "b", oidNum)
assert.Equal(t, "c", oidText)
assert.Equal(t, "d", conversion)
assert.Equal(t, fmt.Errorf("e"), err)
snmpTranslateCaches = nil
}
func TestSnmpTableCache_miss(t *testing.T) {
snmpTableCaches = nil
oid := ".1.0.0.0"
mibName, oidNum, oidText, fields, err := snmpTable(oid)
assert.Len(t, snmpTableCaches, 1)
stc := snmpTableCaches[oid]
require.NotNil(t, stc)
assert.Equal(t, mibName, stc.mibName)
assert.Equal(t, oidNum, stc.oidNum)
assert.Equal(t, oidText, stc.oidText)
assert.Equal(t, fields, stc.fields)
assert.Equal(t, err, stc.err)
}
func TestSnmpTableCache_hit(t *testing.T) {
snmpTableCaches = map[string]snmpTableCache{
"foo": snmpTableCache{
mibName: "a",
oidNum: "b",
oidText: "c",
fields: []Field{{Name: "d"}},
err: fmt.Errorf("e"),
},
}
mibName, oidNum, oidText, fields, err := snmpTable("foo")
assert.Equal(t, "a", mibName)
assert.Equal(t, "b", oidNum)
assert.Equal(t, "c", oidText)
assert.Equal(t, []Field{{Name: "d"}}, fields)
assert.Equal(t, fmt.Errorf("e"), err)
}
func TestError(t *testing.T) {
e := fmt.Errorf("nested error")
err := Errorf(e, "top error %d", 123)

View File

@@ -0,0 +1,92 @@
# USGS Telegraf plugin
This plugin gathers the recent earthquake data from the USGS and turns it into Telegraf metric format. The JSON polled from USGS looks as follows:
```json
{
"type": "FeatureCollection",
"metadata": {
"generated": 1481144380000,
"url": "http://earthquake.usgs.gov/earthquakes/feed/v1.0/summary/all_hour.geojson",
"title": "USGS All Earthquakes, Past Hour",
"status": 200,
"api": "1.5.2",
"count": 4
},
"features": [
{
"type": "Feature",
"properties": {
"mag": 1.82,
"place": "15km ENE of Hawaiian Ocean View, Hawaii",
"time": 1481143731250,
"updated": 1481143943070,
"tz": -600,
"url": "http://earthquake.usgs.gov/earthquakes/eventpage/hv61510176",
"detail": "http://earthquake.usgs.gov/earthquakes/feed/v1.0/detail/hv61510176.geojson",
"felt": null,
"cdi": null,
"mmi": null,
"alert": null,
"status": "automatic",
"tsunami": 0,
"sig": 51,
"net": "hv",
"code": "61510176",
"ids": ",hv61510176,",
"sources": ",hv,",
"types": ",general-link,geoserve,origin,phase-data,",
"nst": 32,
"dmin": 0.07161,
"rms": 0.24,
"gap": 106,
"magType": "md",
"type": "earthquake",
"title": "M 1.8 - 15km ENE of Hawaiian Ocean View, Hawaii"
},
"geometry": {
"type": "Point",
"coordinates": [
-155.6236725,
19.1058331,
0.87
]
},
"id": "hv61510176"
}
],
"bbox": [
-155.6236725,
19.1058331,
0.87,
-117.025,
64.9877,
13.47
]
}
```
Each `Feature` is then converted into a point in InfluxDB:
```yaml
measurement: "earthquakes"
tags:
- magnitude: 1.82,
- url: "http://earthquake.usgs.gov/earthquakes/eventpage/hv61510176",
- detail: "http://earthquake.usgs.gov/earthquakes/feed/v1.0/detail/hv61510176.geojson",
- felt: null,
- cdi: null,
- mmi: null,
- alert: null,
- status: "automatic",
- tsunami: 0,
- sig: 51,
- net: "hv",
- nst: 32,
- dmin: 0.07161,
- rms: 0.24,
- gap: 106,
- magType: "md",
- type: "earthquake",
- title: "M 1.8 - 15km ENE of Hawaiian Ocean View, Hawaii"
```

View File

171
plugins/inputs/usgs/usgs.go Normal file
View File

@@ -0,0 +1,171 @@
package usgs
import (
"encoding/json"
"fmt"
"io/ioutil"
"math"
"net/http"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/plugins/inputs"
)
// USGS is the top level struct for this plugin
type USGS struct {
Ok bool
}
// Description contains a decription of the Plugin's function
func (gs *USGS) Description() string {
return "a plugin to gather USGS earthquake data"
}
// SampleConfig returns a sample configuration for the plugin
func (gs *USGS) SampleConfig() string {
return ""
}
// Gather makes the HTTP call and converts the data
func (gs *USGS) Gather(acc telegraf.Accumulator) error {
resp, err := http.Get("https://earthquake.usgs.gov/earthquakes/feed/v1.0/summary/all_hour.geojson")
if err != nil {
return err
}
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return err
}
eqs := &Resp{}
err = json.Unmarshal(body, eqs)
if err != nil {
return err
}
meas := "usgsdata"
for _, feat := range eqs.Features {
fields := map[string]interface{}{
// Event latitude
"lat": feat.Geometry.Coordinates[0],
// Event longitude
"lng": feat.Geometry.Coordinates[1],
// Event depth
"depth": feat.Geometry.Coordinates[2],
// Earthquake intensity: http://earthquake.usgs.gov/learn/topics/mag_vs_int.php
"intensity": feat.Properties.Cdi,
// Link to detail for this Feature
"detail": feat.Properties.Detail,
// Horizontal distance from the epicenter to the nearest station (in degrees). 1 degree is approximately 111.2 kilometers.
"dmin": feat.Properties.Dmin,
// The total number of felt reports submitted to the DYFI? system.
"felt": feat.Properties.Felt,
// The largest azimuthal gap between azimuthally adjacent stations (in degrees). In general, the smaller this number, the more reliable
"gap": int(feat.Properties.Gap),
// The magnitude for the event
"magnitude": feat.Properties.Mag,
// Method of magnitude calculation: https://earthquake.usgs.gov/data/comcat/data-eventterms.php#magType
"magnitudeType": feat.Properties.MagType,
// The maximum estimated instrumental intensity for the event.
"maxIntensity": feat.Properties.Mmi,
// Human readable place name
"place": feat.Properties.Place,
// A number describing how significant the event is. Larger numbers indicate a more significant event.
"significance": int(feat.Properties.Sig),
// Link to USGS Event Page for event.
"usgsEventPage": feat.Properties.URL,
}
tags := map[string]string{
"latInt": coordToString(feat.Geometry.Coordinates[0]),
"lngInt": coordToString(feat.Geometry.Coordinates[1]),
// Alert is “green”, “yellow”, “orange”, “red”
"alert": toString(feat.Properties.Alert),
// The total number of seismic stations used to determine earthquake location.
"numStations": toString(feat.Properties.Nst),
// Indicates whether the event has been reviewed by a human -> “automatic”, “reviewed”, “deleted”
"reviewStatus": toString(feat.Properties.Status),
// This flag is set to "1" for large events in oceanic regions and "0" otherwise.
"tsunami": toString(feat.Properties.Tsunami),
// Type of siesmic event “earthquake”, “quarry”
"eventType": toString(feat.Properties.Type),
// UTC offset for event Timezone
"utcOffset": toString(feat.Properties.Tz),
}
var t time.Time
// Convert interface to int64
updated := feat.Properties.Updated
// Convert interface to int64
original := feat.Properties.Time
// If the event has been more reciently updated use that as the timestamp
if updated > original {
t = time.Unix(0, updated*int64(time.Millisecond))
} else {
t = time.Unix(0, original*int64(time.Millisecond))
}
acc.AddFields(meas, fields, tags, t)
}
return nil
}
func init() {
inputs.Add("usgs", func() telegraf.Input { return &USGS{} })
}
func toString(s interface{}) string {
return fmt.Sprintf("%v", s)
}
func coordToString(coord float64) string {
foo := math.Floor(coord)
return fmt.Sprintf("%d", int(foo))
}
// Resp is used to unmarshal the response body from USGS
type Resp struct {
Type string `json:"type"`
Metadata struct {
Generated int64 `json:"generated"`
URL string `json:"url"`
Title string `json:"title"`
Status int `json:"status"`
API string `json:"api"`
Count int `json:"count"`
} `json:"metadata"`
Features []struct {
Type string `json:"type"`
Properties struct {
Mag float64 `json:"mag"`
Place string `json:"place"`
Time int64 `json:"time"`
Updated int64 `json:"updated"`
Tz float64 `json:"tz"`
URL string `json:"url"`
Detail string `json:"detail"`
Felt float64 `json:"felt"`
Cdi float64 `json:"cdi"`
Mmi float64 `json:"mmi"`
Alert string `json:"alert"`
Status string `json:"status"`
Tsunami float64 `json:"tsunami"`
Sig float64 `json:"sig"`
Net string `json:"net"`
Code string `json:"code"`
Ids string `json:"ids"`
Sources string `json:"sources"`
Types string `json:"types"`
Nst float64 `json:"nst"`
Dmin float64 `json:"dmin"`
Rms float64 `json:"rms"`
Gap float64 `json:"gap"`
MagType string `json:"magType"`
Type string `json:"type"`
Title string `json:"title"`
} `json:"properties"`
Geometry struct {
Type string `json:"type"`
Coordinates []float64 `json:"coordinates"`
} `json:"geometry"`
ID string `json:"id"`
} `json:"features"`
Bbox []float64 `json:"bbox"`
}