From 771fbc311a8449446637da65237192f6f534b750 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 2 Oct 2017 15:44:55 -0700 Subject: [PATCH 01/95] Regenerate TLS certs due to expiration --- .../http_listener/http_listener_test.go | 187 +++++++----------- scripts/tls-certs.sh | 66 +++++++ 2 files changed, 132 insertions(+), 121 deletions(-) create mode 100644 scripts/tls-certs.sh diff --git a/plugins/inputs/http_listener/http_listener_test.go b/plugins/inputs/http_listener/http_listener_test.go index 97c36978a..2d4da8940 100644 --- a/plugins/inputs/http_listener/http_listener_test.go +++ b/plugins/inputs/http_listener/http_listener_test.go @@ -34,137 +34,82 @@ cpu_load_short,host=server06 value=12.0 1422568543702900257 emptyMsg = "" serviceRootPEM = `-----BEGIN CERTIFICATE----- -MIIDRTCCAi2gAwIBAgIUenakcvMDj2URxBvUHBe0Mfhac0cwDQYJKoZIhvcNAQEL -BQAwGzEZMBcGA1UEAxMQdGVsZWdyYWYtdGVzdC1jYTAeFw0xNzA4MzEwNTE5NDNa -Fw0yNzA4MjkwNTIwMTNaMBsxGTAXBgNVBAMTEHRlbGVncmFmLXRlc3QtY2EwggEi -MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDxpDlUEC6LNXQMhvTtlWKUekwa -xh2OaiR16WvO8iA+sYmjlpFXOe+V6YWT+daOGujCqlGdrfDjj3C3pqFPJ6Q4VXaA -xQyd0Ena7kRtuQ/IUSpTWxyrpSIzKL3dAoV0NYpjFWznjVMP3Rq4l+4cHqviZSvK -bWUK5n0vBGpEw3A22V9urhlSNkSbECvzn9EFHyIeJX603zaKXYw5wiDwCp1swbXW -2WS2h45JeI5xrpKcFmLaqRNe0swi6bkGnmefyCv7nsbOLeKyEW9AExDSd6nSLdu9 -TGzhAfnfodcajSmKiQ+7YL9JY1bQ9hlfXk1ULg4riSEMKF+trZFZUanaXeeBAgMB -AAGjgYAwfjAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4E -FgQUiPkCD8gEsSgIiV8jzACMoUZcHaIwHwYDVR0jBBgwFoAUiPkCD8gEsSgIiV8j -zACMoUZcHaIwGwYDVR0RBBQwEoIQdGVsZWdyYWYtdGVzdC1jYTANBgkqhkiG9w0B -AQsFAAOCAQEAXeadR7ZVkb2C0F8OEd2CQxVt2/JOqM4G2N2O8uTwf+hIn+qm+jbb -Q6JokGhr5Ybhvtv3U9JnI6RVI+TOYNkDzs5e2DtntFQmcKb2c+y5Z+OpvWd13ObK -GMCs4bho6O7h1qo1Z+Ftd6sYQ7JL0MuTGWCNbXv2c1iC4zPT54n1vGZC5so08RO0 -r7bqLLEnkSawabvSAeTxtweCXJUw3D576e0sb8oU0AP/Hn/2IC9E1vFZdjDswEfs -ARE4Oc5XnN6sqjtp0q5CqPpW6tYFwxdtZFk0VYPXyRnETVgry7Dc/iX6mktIYUx+ -qWSyPEDKALyxx6yUyVDqgcY2VUm0rM/1Iw== +MIIBxzCCATCgAwIBAgIJAOLq2g9+9TVgMA0GCSqGSIb3DQEBCwUAMBYxFDASBgNV +BAMMC1RlbGVncmFmIENBMB4XDTE3MTAwMjIyNDMwOFoXDTE3MTEwMTIyNDMwOFow +FjEUMBIGA1UEAwwLVGVsZWdyYWYgQ0EwgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJ +AoGBALHtGXLKZz3HUA4E1H0mR3gAtgNwUSRArxylCjQwO/7tFEYDFVCCPFzAF7G8 +hzHyBNgx5FwNrH3bMEol9iIxzoZNU0XTWS7DzN4S+89C2Tn+NaFko/SeFBMp4IK/ +55YAgcYGe2QbFnPITGYPT05VkbSBMD0PBITNSwsclGZGFVoHAgMBAAGjHTAbMAwG +A1UdEwQFMAMBAf8wCwYDVR0PBAQDAgEGMA0GCSqGSIb3DQEBCwUAA4GBAIJpAA+X +QB57JhNxevUlFFLmGx7ASKrOeZLupzak4qUK718erafMAsXhydx1eKL/5Ne7ZcFa +Tf6dRPzCjv89WzYK/kJ59AgATkXNPvADRUKd0ViQw4Q4EcfuQrTMEym+gl1W2qQl +U9/eBDE341pcrfdHHGhS5LKv6KTmjyYmDLxl -----END CERTIFICATE-----` serviceCertPEM = `-----BEGIN CERTIFICATE----- -MIIDKjCCAhKgAwIBAgIUVYjQKruuFavlMZvV7X6RRF4OyBowDQYJKoZIhvcNAQEL -BQAwGzEZMBcGA1UEAxMQdGVsZWdyYWYtdGVzdC1jYTAeFw0xNzA4MzEwNTM3MjRa -Fw0xNzA5MzAwNTM3NTRaMBQxEjAQBgNVBAMTCWxvY2FsaG9zdDCCASIwDQYJKoZI -hvcNAQEBBQADggEPADCCAQoCggEBANojLHm+4ttLfl8xo4orZ436/o36wdQ30sWz -xE8eGejhARvCSNIR1Tau41Towq/MQVQQejQJRgqBSz7UEfzJNJGKKKc560j6fmTM -FHpFNZcTrNrTb0r3blUWF1oswhTgg313OXbVsz+E9tHkT1p/s9uURy3TJ3O/CFHq -2vTiTQMTq31v0FEN1E/d6uzMhnGy5+QuRu/0A2iPpgXgPopYZwG5t4hN1KklM//l -j2gMlX6mAYalctFOkDbhIe4/4dQcfT0sWA49KInZmUeB1RdyiNfCoXnDRZHocPIj -ltYAK/Igda0fdlMisoqh2ZMrCt8yhws7ycc12cFi7ZMv8zvi5p8CAwEAAaNtMGsw -EwYDVR0lBAwwCgYIKwYBBQUHAwEwHQYDVR0OBBYEFCdE87Nz7vPpgRmj++6J8rQR -0F/TMB8GA1UdIwQYMBaAFIj5Ag/IBLEoCIlfI8wAjKFGXB2iMBQGA1UdEQQNMAuC -CWxvY2FsaG9zdDANBgkqhkiG9w0BAQsFAAOCAQEAIPhMYCsCPvOcvLLkahaZVn2g -ZbzPDplFhEsH1cpc7vd3GCV2EYjNTbBTDs5NlovSbJLf1DFB+gwsfEjhlFVZB3UQ -6GtuA5CQh/Skv8ngCDiLP50BbKF0CLa4Ia0xrSTAyRsg2rt9APphbej0yKqJ7j8U -1KK6rjOSnuzrKseex26VVovjPFq0FgkghWRm0xrAeizGTBCSEStZEPhk3pBo2x95 -a32VPpmhlQMDyiV6m1cc9/MfxMisnyeLqJl8E9nziNa4/BgwwN9DcOp63D9OOa6A -brtLz8OXqvV+7gKlq+nASFDimXwFKRyqRH6ECyHNTE2K14KZb7+JTa0AUm6Nlw== +MIIBzzCCATigAwIBAgIBATANBgkqhkiG9w0BAQsFADAWMRQwEgYDVQQDDAtUZWxl +Z3JhZiBDQTAeFw0xNzEwMDIyMjQzMDhaFw0yNzA5MzAyMjQzMDhaMBQxEjAQBgNV +BAMMCWxvY2FsaG9zdDCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAoI/8ceps +DvvA3KUDViYwZcB+RvfT6XCPCT35mEzuXWP42JHk1VPNA41215U8CGoJF7+OzRcZ +an3b2WLfAph+bi4Vmpe8eolmPHjf57jJ2fdDeLtMA4T0WF8yR4fHxrrU2UFsgXod +kpQNqa/R5+iEKNMQVQgD2HjP5BE1u+H6fscCAwEAAaMvMC0wCQYDVR0TBAIwADAL +BgNVHQ8EBAMCBSAwEwYDVR0lBAwwCgYIKwYBBQUHAwEwDQYJKoZIhvcNAQELBQAD +gYEAV5vx8FHNlD6Z3e01/MiNSmYXn93CwlixYMRyW1Ri2P6hMtJiMRp59fNFzroa +iv6djr30uuKYOiAvdKhNaYWERgrtjGVEuPoIMQfaAaKHQj6CKLBXeGZ5Gxhy+M6G +OE6g0E4ufHOqr1h1GDIiAq88zyJ2AupgLLUCMFtkq0v0mr0= -----END CERTIFICATE-----` serviceKeyPEM = `-----BEGIN RSA PRIVATE KEY----- -MIIEowIBAAKCAQEA2iMseb7i20t+XzGjiitnjfr+jfrB1DfSxbPETx4Z6OEBG8JI -0hHVNq7jVOjCr8xBVBB6NAlGCoFLPtQR/Mk0kYoopznrSPp+ZMwUekU1lxOs2tNv -SvduVRYXWizCFOCDfXc5dtWzP4T20eRPWn+z25RHLdMnc78IUera9OJNAxOrfW/Q -UQ3UT93q7MyGcbLn5C5G7/QDaI+mBeA+ilhnAbm3iE3UqSUz/+WPaAyVfqYBhqVy -0U6QNuEh7j/h1Bx9PSxYDj0oidmZR4HVF3KI18KhecNFkehw8iOW1gAr8iB1rR92 -UyKyiqHZkysK3zKHCzvJxzXZwWLtky/zO+LmnwIDAQABAoIBABD8MidcrK9kpndl -FxXYIV0V0SJfBx6uJhRM1hlO/7d5ZauyqhbpWo/CeGMRKK+lmOShz9Ijcre4r5I5 -0xi61gQLHPVAdkidcKAKoAGRSAX2ezwiwIS21Xl8md7ko0wa20I2uVu+chGdGdbo -DyG91dRgLFauHWFO26f9QIVW5aY6ifyjg1fyxR/9n2YZfkqbjvASW4Mmfv5GR1aT -mffajgsquy78PKs86f879iG+cfCzPYdoK+h7fsm4EEqDwK8JCsUIY1qN+Tuj5RQY -zuIuD34+wywe7Jd1vwjQ40Cyilgtnu8Q8s8J05bXrD3mqer5nrqIGOX0vKgs+EXx -1hV+6ZECgYEA+950L2u8oPzNXu9BAL8Y5Tl384qj1+Cj/g28MuZFoPf/KU0HRN6l -PBlXKaGP9iX+749tdiNPk5keIwOL8xCVXOpMLOA/jOlGODydG9rX67WCL/R1RcJR -+Pip8dxO1ZNpOKHud06XLMuuVz9qNq0Xwb1VCzNTOxnEDwtXNyDm6OkCgYEA3bcW -hMeDNn85UA4n0ljcdCmEu12WS7L//jaAOWuPPfM3GgKEIic6hqrPWEuZlZtQnybx -L6qQgaWyCfl/8z0+5WynQqkVPz1j1dSrSKnemxyeySrmUcOH5UJfAKaG5PUd7H3t -oPTCxkbW3Bi2QLlgd4nb7+OEk6w0V9Zzv4AFHkcCgYBL/aD2Ub4WoE9iLjNhg0aC -mmUrcI/gaSFxXDmE7d7iIxC0KE5iI/6cdFTM9bbWoD4bjx2KgDrZIGBsVfyaeE1o -PDSBcaMa46LRAtCv/8YXkqrVxx6+zlMnF/dGRp7uZ0xeztSA4JBR7p4KKtLj7jN1 -u6b1+yVIdoylsVk+A8pHSQKBgQCUcsn5DTyleHl/SHsRM74naMUeToMbHDaalxMz -XvkBmZ8DIzwlQe7FzAgYLkYfDWblqMVEDQfERpT2aL9qtU8vfZhf4aYAObJmsYYd -mN8bLAaE2txrUmfi8JV7cgRPuG7YsVgxtK/U4glqRIGCxJv6bat86vERjvNc/JFz -XtwOcQKBgF83Ov+EA9pL0AFs+BMiO+0SX/OqLX0TDMSqUKg3jjVfgl+BSBEZIsOu -g5jqHBx3Om/UyrXdn+lnMhyEgCuNkeC6057B5iGcWucTlnINeejXk/pnbvMtGjD1 -OGWmdXhgLtKg6Edqm+9fnH0UJN6DRxRRCUfzMfbY8TRmLzZG2W34 +MIICXAIBAAKBgQCgj/xx6mwO+8DcpQNWJjBlwH5G99PpcI8JPfmYTO5dY/jYkeTV +U80DjXbXlTwIagkXv47NFxlqfdvZYt8CmH5uLhWal7x6iWY8eN/nuMnZ90N4u0wD +hPRYXzJHh8fGutTZQWyBeh2SlA2pr9Hn6IQo0xBVCAPYeM/kETW74fp+xwIDAQAB +AoGABiRb6NOp3Ize3NHnJcWCNnI9omNalOR8ZEMdqCjROXtYiphSI6L4BbnEoQyR +ZlUAEgt+3/ORQlScM12n4EaLF4Zi4CTGmibRHUff/ybUDGMg2Lp/AL/ghP/3U37l +C/oRjohK9Rqn28hf8xgL9Jz+KbQaVv5f+frLwL3EKreYtOkCQQDLe1s89rbxvTZr +PhtwYrnXC8KbBNPIzJbTXrphqr0H3xuDlTpd+4tvIlL6LoqANYXAmHHlKUuPcar6 +QCj9xNwTAkEAygDRac8qewqIWhZOs0u8phC37dxzwVXslrgjO+kTLxN/Q1srK45T +gHDbJuCrBPkYrjAXWHd2rIkOWl0rk38A/QJADct4HQLw1iSous6EF7Npu+19LPs/ +zF4qX3wNkK99jzoN6HbGdTandkpSa8mZ9CUswyjSl+Gb0Ma4+6w72zBsZwJBAKn+ +Zj0VCjrhcj3d5/0bD3bxOtgBXaimFqP/8ibIzkwfrEmSv5G4BK1iTAs7prBYsFxm +PD9GyagI7vs8zR8jEkECQD51jhM8DDPah/ECC31we54Y9dqBOupy1a8y6os1YFkv +BV7zTVrpOzwUsrkMW+wFyQSX9eyyMfJHJihlobXA+QY= -----END RSA PRIVATE KEY-----` clientRootPEM = `-----BEGIN CERTIFICATE----- -MIIDRTCCAi2gAwIBAgIUenakcvMDj2URxBvUHBe0Mfhac0cwDQYJKoZIhvcNAQEL -BQAwGzEZMBcGA1UEAxMQdGVsZWdyYWYtdGVzdC1jYTAeFw0xNzA4MzEwNTE5NDNa -Fw0yNzA4MjkwNTIwMTNaMBsxGTAXBgNVBAMTEHRlbGVncmFmLXRlc3QtY2EwggEi -MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDxpDlUEC6LNXQMhvTtlWKUekwa -xh2OaiR16WvO8iA+sYmjlpFXOe+V6YWT+daOGujCqlGdrfDjj3C3pqFPJ6Q4VXaA -xQyd0Ena7kRtuQ/IUSpTWxyrpSIzKL3dAoV0NYpjFWznjVMP3Rq4l+4cHqviZSvK -bWUK5n0vBGpEw3A22V9urhlSNkSbECvzn9EFHyIeJX603zaKXYw5wiDwCp1swbXW -2WS2h45JeI5xrpKcFmLaqRNe0swi6bkGnmefyCv7nsbOLeKyEW9AExDSd6nSLdu9 -TGzhAfnfodcajSmKiQ+7YL9JY1bQ9hlfXk1ULg4riSEMKF+trZFZUanaXeeBAgMB -AAGjgYAwfjAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4E -FgQUiPkCD8gEsSgIiV8jzACMoUZcHaIwHwYDVR0jBBgwFoAUiPkCD8gEsSgIiV8j -zACMoUZcHaIwGwYDVR0RBBQwEoIQdGVsZWdyYWYtdGVzdC1jYTANBgkqhkiG9w0B -AQsFAAOCAQEAXeadR7ZVkb2C0F8OEd2CQxVt2/JOqM4G2N2O8uTwf+hIn+qm+jbb -Q6JokGhr5Ybhvtv3U9JnI6RVI+TOYNkDzs5e2DtntFQmcKb2c+y5Z+OpvWd13ObK -GMCs4bho6O7h1qo1Z+Ftd6sYQ7JL0MuTGWCNbXv2c1iC4zPT54n1vGZC5so08RO0 -r7bqLLEnkSawabvSAeTxtweCXJUw3D576e0sb8oU0AP/Hn/2IC9E1vFZdjDswEfs -ARE4Oc5XnN6sqjtp0q5CqPpW6tYFwxdtZFk0VYPXyRnETVgry7Dc/iX6mktIYUx+ -qWSyPEDKALyxx6yUyVDqgcY2VUm0rM/1Iw== +MIIBxzCCATCgAwIBAgIJAOLq2g9+9TVgMA0GCSqGSIb3DQEBCwUAMBYxFDASBgNV +BAMMC1RlbGVncmFmIENBMB4XDTE3MTAwMjIyNDMwOFoXDTE3MTEwMTIyNDMwOFow +FjEUMBIGA1UEAwwLVGVsZWdyYWYgQ0EwgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJ +AoGBALHtGXLKZz3HUA4E1H0mR3gAtgNwUSRArxylCjQwO/7tFEYDFVCCPFzAF7G8 +hzHyBNgx5FwNrH3bMEol9iIxzoZNU0XTWS7DzN4S+89C2Tn+NaFko/SeFBMp4IK/ +55YAgcYGe2QbFnPITGYPT05VkbSBMD0PBITNSwsclGZGFVoHAgMBAAGjHTAbMAwG +A1UdEwQFMAMBAf8wCwYDVR0PBAQDAgEGMA0GCSqGSIb3DQEBCwUAA4GBAIJpAA+X +QB57JhNxevUlFFLmGx7ASKrOeZLupzak4qUK718erafMAsXhydx1eKL/5Ne7ZcFa +Tf6dRPzCjv89WzYK/kJ59AgATkXNPvADRUKd0ViQw4Q4EcfuQrTMEym+gl1W2qQl +U9/eBDE341pcrfdHHGhS5LKv6KTmjyYmDLxl -----END CERTIFICATE-----` clientCertPEM = `-----BEGIN CERTIFICATE----- -MIIDMDCCAhigAwIBAgIUIVOF5g2zH6+J/dbGdu4q18aSJoMwDQYJKoZIhvcNAQEL -BQAwGzEZMBcGA1UEAxMQdGVsZWdyYWYtdGVzdC1jYTAeFw0xNzA4MzEwNTQ1MzJa -Fw0yNzA4MjUwMTQ2MDJaMBcxFTATBgNVBAMTDGR1bW15LWNsaWVudDCCASIwDQYJ -KoZIhvcNAQEBBQADggEPADCCAQoCggEBAKok1HJ40buyjrS+DG9ORLzrWIJad2y/ -6X2Bg9MSENfpEUgaS7nK2ML3m1e2poHqBSR+V8VECNs+MDCLSOeQ4FC1TdBKMLfw -NxW88y5Gj6rTRcAXl092ba7stwbqJPBAZu1Eh1jXIp5nrFKh8Jq7kRxmMB5vC70V -fOSPS0RZtEd7D+QZ6jgkFJWsZzn4gJr8nc/kmLcntLw+g/tz9/8lfaV306tLlhMH -dv3Ka6Nt86j6/muOwvoeAkAnCEFAgDcXg4F37PFAiEHRw9DyTeWDuZqvnMZ3gosL -kl15QhnP0yG2QCjSb1gaLcKB42cyxDnPc31WsVuuzQnajazcVf3lJW0CAwEAAaNw -MG4wEwYDVR0lBAwwCgYIKwYBBQUHAwIwHQYDVR0OBBYEFCemMO+Qlj+YCLQ3ScAQ -8XYJJJ5ZMB8GA1UdIwQYMBaAFIj5Ag/IBLEoCIlfI8wAjKFGXB2iMBcGA1UdEQQQ -MA6CDGR1bW15LWNsaWVudDANBgkqhkiG9w0BAQsFAAOCAQEARThbApKvvGDp7uSc -mINaqDOHe69F9PepV0/3+B5+X1b3yd2sbzZL/ZoHl27kajSHVrUF+09gcTosfuY3 -omnIPw+NseqTJG+qTMRb3AarLNO46EJZLOowAEhnJyVmhK5uU0YqhV1X9eN+g4/o -BuyOPvHj6UJWviZFy6fDIj2N+ygN/CNP5X3iLDBUoyCEHAehLiQr0aRgsqe4JLlS -P+0l0btTUpcqUhsQy+sD2lv3MO1tZ/P4zhzu0J0LUeLBDdOPf/FIvTgkCNxN9GGy -SLmeBeCzsKmWbzE3Yuahw3h4IblVyyGc7ZDGIobDrZgFqshcZylU8wrsjUnjNSPA -G+LOWQ== +MIIBzjCCATegAwIBAgIBAjANBgkqhkiG9w0BAQsFADAWMRQwEgYDVQQDDAtUZWxl +Z3JhZiBDQTAeFw0xNzEwMDIyMjQzMDhaFw0yNzA5MzAyMjQzMDhaMBMxETAPBgNV +BAMMCHRlbGVncmFmMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDIrPGv8Sm1 +6tI+vlATzWGOK1D40iNTiGj4FpcS2Tm4SdaDSfa3VL9N5l8aeuN4E8O2YXK3QcR8 +NoeY87cWW06PtFc/ByS42VeWDKt28/DpGzbrzCVNOumS3X5QEyySYLpi0uqI9ZZ5 +O2sOJ2yVua8F3cwqPTveVmU3LeQfVrh7QwIDAQABoy8wLTAJBgNVHRMEAjAAMAsG +A1UdDwQEAwIHgDATBgNVHSUEDDAKBggrBgEFBQcDAjANBgkqhkiG9w0BAQsFAAOB +gQAVEfHePY9fumW8rkbbSbiuQ1dGIINbMGPO17eAjOxMT4Z1jDb8oTVHbaZM0rKo +wKx4dDp5mnLK+NuMZ1sNxKOf6IMmQ022ANOYM0dkwfg13bpC3BGW8Z7nOFK0xXh6 +4KTcXktBUtubmn6w7szvWY2OajPVoiGgcapwwhCrBEa6rg== -----END CERTIFICATE-----` clientKeyPEM = `-----BEGIN RSA PRIVATE KEY----- -MIIEogIBAAKCAQEAqiTUcnjRu7KOtL4Mb05EvOtYglp3bL/pfYGD0xIQ1+kRSBpL -ucrYwvebV7amgeoFJH5XxUQI2z4wMItI55DgULVN0Eowt/A3FbzzLkaPqtNFwBeX -T3Ztruy3Buok8EBm7USHWNcinmesUqHwmruRHGYwHm8LvRV85I9LRFm0R3sP5Bnq -OCQUlaxnOfiAmvydz+SYtye0vD6D+3P3/yV9pXfTq0uWEwd2/cpro23zqPr+a47C -+h4CQCcIQUCANxeDgXfs8UCIQdHD0PJN5YO5mq+cxneCiwuSXXlCGc/TIbZAKNJv -WBotwoHjZzLEOc9zfVaxW67NCdqNrNxV/eUlbQIDAQABAoIBAAXZYEhTKPqn58oE -4o6NBUXtXUyV6ZcefdtnsW13KIcTpxlwdfv8IjmJo5h/WfgLYIPhqAjLDvbii2uP -zkDPtTZxFSy88DHSm0IvDbkgid3Yh4RUC0qbCqhB0QT21bBAtokfmvuN4c3KSJ1K -nefj3Ng6Fxtku+WTMIj2+CJwZwcyAH47ZUngYs/77gA0hAJcbdL/bj8Bpmd+lH6C -Ci22T2hrw+cpWMN6qwa3wxWIneCaqxkylSgpUzSNE0QO3mXkX+NYtL2BQ0w+wPqq -lww3QJOFAX1qRLflglL9K+ruTQofm49vxv6apsoqdkrxEBoPzkljlqiPRmzUxau4 -cvbApQECgYEAy5m5O3mQt6DBrDRJWSwpZh6DRNd5USEqXOIFtp+Qze2Jx1pYQfZt -NOXOrwy04o0+6yLzc4O4W5ta2KfTlALFzCa6Na3Ca4ZUAeteWprrdh8b1b2w/wUH -E3uQFkvH0zFdPsA3pTTZ0k/ydmHnu4zZqBnSeh0dIW8xFYgZZCgQusECgYEA1e7O -ujCUa8y49sY42D/Y/c8B96xVfJZO5hhY7eLgkzqUlmFl31Ld7AjlJcXpbMeW1vaa -0Mxbfx2qAVaZEkvdnXq3V8spe6qOGBdlKzey4DMEfmEXLFp5DRYCSwpXiqDZcGqc -jwI58wuzKoDgydN9bLdF8XYGtQXnHIE9WyTYMa0CgYBKYSBgb+rEir/2LyvUneOJ -4P/HuIgjcWBOimvX6bc2495/q6uufV4sAwBcxuGWGk+wCxaxTp+dJ8YqfDU5T0H/ -cO56Cb6LFYm/IcNYilwWzQqYLTJqF+Yb4fojiw+3QcN01zf87K/eu0IyqVXFGJGz -bauM3PH1cu+VlCDijBiAgQKBgDOQ9YmRriTx2t+41fjiIvbC0BGYG58FSA1UbxMg -LcuvQiOhZIHZIp8DYeCh/Or4jRZRqO2NZLyWNOVPr2Pmn4uXCdyCnwQtD0UlVoB9 -U4ORKJMh6gkJ4cXSuUjHPGSw8tiTChu6iKdZ+ZzUJdrgPIpY/uX98g3uV0/aoyR2 -FBqdAoGAQIrcOsTpCe6l3ZDtQyNIeAj1s7sZyW8MBy95RXw3y/yzVEOAu4yWNobj -RReeHQEsrQq+sJ/cols8HfoOpGpL3U0IGDi5vr1JlOXmBhFX2xuFrfh3jvgXlUqb -fqxPcT3d7I/UEi0ueDh3osyTn46mDfRfF7HBLBNeyQbIFWBDDus= +MIICXgIBAAKBgQDIrPGv8Sm16tI+vlATzWGOK1D40iNTiGj4FpcS2Tm4SdaDSfa3 +VL9N5l8aeuN4E8O2YXK3QcR8NoeY87cWW06PtFc/ByS42VeWDKt28/DpGzbrzCVN +OumS3X5QEyySYLpi0uqI9ZZ5O2sOJ2yVua8F3cwqPTveVmU3LeQfVrh7QwIDAQAB +AoGAHtvpdqLhRSZNGnTtn33vyIsEsp6t7ASID855gN6Cr8I7CIlxNRQFLxeD/HB1 +VlvDtuIZX/DvJCLGi1C/EOMNm2nY7IT2gZgMpxvmfjfGhHKT1MWYu9cdyiOOacqD +yRDAcKpubIPEIV3aczglv9sVApXwZcgePzDwweTVfP/Nv5ECQQDthIv5Y5k3UO8h +Hht+27W8McFJ5eiF5OcLGOQ4nKGHkCOskfD4u/i+j+4dUeGBdpT8CzszgofBa6wh +dJevQerVAkEA2Ep8PUfXRjel8NiLNL8iK/SR26y8wPduKam31SMUPq71+GROKkFz +yYYAbKORs+fS6LBT+M48cEu470o+g8eptwJBALzCEMeSOqp2XIRSPAG2NBiq5fSH +jSIThvYPwxemisyEZYV4uivCnu06zz5n2zIa/k3L0zGdc6vomPRBh2aVmT0CQQCY +/B5ibfUbqnLKJzBXb7Xo50Vf3w9nYdvexjfMHtLL/47lUXVkOAWBDjIwpYWCfb/V +bBsJCj7/ot+9CYOsTEaDAkEA4XAGFxx78JMVuJLjevkf0pGUPEocdoOAvpYWT5sR +9FODrPEtW84ZevSmuByjzeqVzS3ElIxACopRJgSN20d9vg== -----END RSA PRIVATE KEY-----` ) diff --git a/scripts/tls-certs.sh b/scripts/tls-certs.sh new file mode 100644 index 000000000..39e8cf359 --- /dev/null +++ b/scripts/tls-certs.sh @@ -0,0 +1,66 @@ +#!/bin/sh + +mkdir certs certs_by_serial private && +chmod 700 private && +echo 01 > ./serial && +touch ./index.txt && +cat >./openssl.conf < Date: Mon, 2 Oct 2017 17:15:34 -0700 Subject: [PATCH 02/95] Fix case sensitivity error in sqlserver input (#3287) --- plugins/inputs/sqlserver/sqlserver.go | 37 ++++++++++++++------------- 1 file changed, 19 insertions(+), 18 deletions(-) diff --git a/plugins/inputs/sqlserver/sqlserver.go b/plugins/inputs/sqlserver/sqlserver.go index 2bc33a54a..373e12f7c 100644 --- a/plugins/inputs/sqlserver/sqlserver.go +++ b/plugins/inputs/sqlserver/sqlserver.go @@ -2,11 +2,12 @@ package sqlserver import ( "database/sql" - "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/plugins/inputs" "sync" "time" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/inputs" + // go-mssqldb initialization _ "github.com/zensqlmonitor/go-mssqldb" ) @@ -1436,16 +1437,16 @@ SELECT , type = 'Wait stats' ---- values , [I/O] = SUM([I/O]) -, [Latch] = SUM([Latch]) -, [Lock] = SUM([Lock]) -, [Network] = SUM([Network]) -, [Service broker] = SUM([Service broker]) -, [Memory] = SUM([Memory]) -, [Buffer] = SUM([Buffer]) +, [Latch] = SUM([LATCH]) +, [Lock] = SUM([LOCK]) +, [Network] = SUM([NETWORK]) +, [Service broker] = SUM([SERVICE BROKER]) +, [Memory] = SUM([MEMORY]) +, [Buffer] = SUM([BUFFER]) , [CLR] = SUM([CLR]) , [SQLOS] = SUM([SQLOS]) -, [XEvent] = SUM([XEvent]) -, [Other] = SUM([Other]) +, [XEvent] = SUM([XEVENT]) +, [Other] = SUM([OTHER]) , [Total] = SUM([I/O]+[LATCH]+[LOCK]+[NETWORK]+[SERVICE BROKER]+[MEMORY]+[BUFFER]+[CLR]+[XEVENT]+[SQLOS]+[OTHER]) FROM ( @@ -1479,16 +1480,16 @@ SELECT , type = 'Wait stats' ---- values , [I/O] = SUM([I/O]) -, [Latch] = SUM([Latch]) -, [Lock] = SUM([Lock]) -, [Network] = SUM([Network]) -, [Service broker] = SUM([Service broker]) -, [Memory] = SUM([Memory]) -, [Buffer] = SUM([Buffer]) +, [Latch] = SUM([LATCH]) +, [Lock] = SUM([LOCK]) +, [Network] = SUM([NETWORK]) +, [Service broker] = SUM([SERVICE BROKER]) +, [Memory] = SUM([MEMORY]) +, [Buffer] = SUM([BUFFER]) , [CLR] = SUM([CLR]) , [SQLOS] = SUM([SQLOS]) -, [XEvent] = SUM([XEvent]) -, [Other] = SUM([Other]) +, [XEvent] = SUM([XEVENT]) +, [Other] = SUM([OTHER]) , [Total] = SUM([I/O]+[LATCH]+[LOCK]+[NETWORK]+[SERVICE BROKER]+[MEMORY]+[BUFFER]+[CLR]+[XEVENT]+[SQLOS]+[OTHER]) FROM ( From f67350107d71ce6465aeeeda4e38ac9071a3b8b5 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 2 Oct 2017 17:16:38 -0700 Subject: [PATCH 03/95] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 41a97ed7b..432c07b95 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -50,6 +50,7 @@ - [#3265](https://github.com/influxdata/telegraf/issues/3265): Fix parsing of JSON with a UTF8 BOM in httpjson. - [#2887](https://github.com/influxdata/telegraf/issues/2887): Allow JSON data format to contain zero metrics. - [#3284](https://github.com/influxdata/telegraf/issues/3284): Fix format of connection_timeout in mqtt_consumer. +- [#3081](https://github.com/influxdata/telegraf/issues/3081): Fix case sensitivity error in sqlserver input. ## v1.4.1 [2017-09-26] From aba269e94c887f8259e8bec6ed00afab04df9f18 Mon Sep 17 00:00:00 2001 From: Jimena Cabrera Notari Date: Tue, 3 Oct 2017 01:38:51 +0100 Subject: [PATCH 04/95] Add extra wired tiger cache metrics to mongodb input (#3281) --- plugins/inputs/mongodb/mongodb_data.go | 21 ++++++++-- plugins/inputs/mongodb/mongodb_data_test.go | 18 +++++++-- plugins/inputs/mongodb/mongostat.go | 43 +++++++++++++++++++-- 3 files changed, 72 insertions(+), 10 deletions(-) diff --git a/plugins/inputs/mongodb/mongodb_data.go b/plugins/inputs/mongodb/mongodb_data.go index 47f35f199..36eabc90c 100644 --- a/plugins/inputs/mongodb/mongodb_data.go +++ b/plugins/inputs/mongodb/mongodb_data.go @@ -77,6 +77,21 @@ var WiredTigerStats = map[string]string{ "percent_cache_used": "CacheUsedPercent", } +var WiredTigerExtStats = map[string]string{ + "wtcache_tracked_dirty_bytes": "TrackedDirtyBytes", + "wtcache_current_bytes": "CurrentCachedBytes", + "wtcache_max_bytes_configured": "MaxBytesConfigured", + "wtcache_app_threads_page_read_count": "AppThreadsPageReadCount", + "wtcache_app_threads_page_read_time": "AppThreadsPageReadTime", + "wtcache_app_threads_page_write_count": "AppThreadsPageWriteCount", + "wtcache_bytes_written_from": "BytesWrittenFrom", + "wtcache_bytes_read_into": "BytesReadInto", + "wtcache_pages_evicted_by_app_thread": "PagesEvictedByAppThread", + "wtcache_pages_queued_for_eviction": "PagesQueuedForEviction", + "wtcache_server_evicting_pages": "ServerEvictingPages", + "wtcache_worker_thread_evictingpages": "WorkerThreadEvictingPages", +} + var DbDataStats = map[string]string{ "collections": "Collections", "objects": "Objects", @@ -121,13 +136,11 @@ func (d *MongodbData) AddDefaultStats() { floatVal, _ := strconv.ParseFloat(percentVal, 64) d.add(key, floatVal) } + d.addStat(statLine, WiredTigerExtStats) } } -func (d *MongodbData) addStat( - statLine reflect.Value, - stats map[string]string, -) { +func (d *MongodbData) addStat(statLine reflect.Value, stats map[string]string) { for key, value := range stats { val := statLine.FieldByName(value).Interface() d.add(key, val) diff --git a/plugins/inputs/mongodb/mongodb_data_test.go b/plugins/inputs/mongodb/mongodb_data_test.go index e2ecac49b..9082061d3 100644 --- a/plugins/inputs/mongodb/mongodb_data_test.go +++ b/plugins/inputs/mongodb/mongodb_data_test.go @@ -70,9 +70,21 @@ func TestAddReplStats(t *testing.T) { func TestAddWiredTigerStats(t *testing.T) { d := NewMongodbData( &StatLine{ - StorageEngine: "wiredTiger", - CacheDirtyPercent: 0, - CacheUsedPercent: 0, + StorageEngine: "wiredTiger", + CacheDirtyPercent: 0, + CacheUsedPercent: 0, + TrackedDirtyBytes: 0, + CurrentCachedBytes: 0, + MaxBytesConfigured: 0, + AppThreadsPageReadCount: 0, + AppThreadsPageReadTime: 0, + AppThreadsPageWriteCount: 0, + BytesWrittenFrom: 0, + BytesReadInto: 0, + PagesEvictedByAppThread: 0, + PagesQueuedForEviction: 0, + ServerEvictingPages: 0, + WorkerThreadEvictingPages: 0, }, tags, ) diff --git a/plugins/inputs/mongodb/mongostat.go b/plugins/inputs/mongodb/mongostat.go index bddd3fda9..cb7710798 100644 --- a/plugins/inputs/mongodb/mongostat.go +++ b/plugins/inputs/mongodb/mongostat.go @@ -127,9 +127,19 @@ type ConcurrentTransStats struct { // CacheStats stores cache statistics for WiredTiger. type CacheStats struct { - TrackedDirtyBytes int64 `bson:"tracked dirty bytes in the cache"` - CurrentCachedBytes int64 `bson:"bytes currently in the cache"` - MaxBytesConfigured int64 `bson:"maximum bytes configured"` + TrackedDirtyBytes int64 `bson:"tracked dirty bytes in the cache"` + CurrentCachedBytes int64 `bson:"bytes currently in the cache"` + MaxBytesConfigured int64 `bson:"maximum bytes configured"` + AppThreadsPageReadCount int64 `bson:"application threads page read from disk to cache count"` + AppThreadsPageReadTime int64 `bson:"application threads page read from disk to cache time (usecs)"` + AppThreadsPageWriteCount int64 `bson:"application threads page write from cache to disk count"` + AppThreadsPageWriteTime int64 `bson:"application threads page write from cache to disk time (usecs)"` + BytesWrittenFrom int64 `bson:"bytes written from cache"` + BytesReadInto int64 `bson:"bytes read into cache"` + PagesEvictedByAppThread int64 `bson:"pages evicted by application threads"` + PagesQueuedForEviction int64 `bson:"pages queued for eviction"` + ServerEvictingPages int64 `bson:"eviction server evicting pages"` + WorkerThreadEvictingPages int64 `bson:"eviction worker thread evicting pages"` } // TransactionStats stores transaction checkpoints in WiredTiger. @@ -406,6 +416,20 @@ type StatLine struct { CacheDirtyPercent float64 CacheUsedPercent float64 + // Cache ultilization extended (wiredtiger only) + TrackedDirtyBytes int64 + CurrentCachedBytes int64 + MaxBytesConfigured int64 + AppThreadsPageReadCount int64 + AppThreadsPageReadTime int64 + AppThreadsPageWriteCount int64 + BytesWrittenFrom int64 + BytesReadInto int64 + PagesEvictedByAppThread int64 + PagesQueuedForEviction int64 + ServerEvictingPages int64 + WorkerThreadEvictingPages int64 + // Replicated Opcounter fields InsertR, QueryR, UpdateR, DeleteR, GetMoreR, CommandR int64 ReplLag int64 @@ -534,6 +558,19 @@ func NewStatLine(oldMongo, newMongo MongoStatus, key string, all bool, sampleSec returnVal.Flushes = newStat.WiredTiger.Transaction.TransCheckpoints - oldStat.WiredTiger.Transaction.TransCheckpoints returnVal.CacheDirtyPercent = float64(newStat.WiredTiger.Cache.TrackedDirtyBytes) / float64(newStat.WiredTiger.Cache.MaxBytesConfigured) returnVal.CacheUsedPercent = float64(newStat.WiredTiger.Cache.CurrentCachedBytes) / float64(newStat.WiredTiger.Cache.MaxBytesConfigured) + + returnVal.TrackedDirtyBytes = newStat.WiredTiger.Cache.TrackedDirtyBytes + returnVal.CurrentCachedBytes = newStat.WiredTiger.Cache.CurrentCachedBytes + returnVal.MaxBytesConfigured = newStat.WiredTiger.Cache.MaxBytesConfigured + returnVal.AppThreadsPageReadCount = newStat.WiredTiger.Cache.AppThreadsPageReadCount + returnVal.AppThreadsPageReadTime = newStat.WiredTiger.Cache.AppThreadsPageReadTime + returnVal.AppThreadsPageWriteCount = newStat.WiredTiger.Cache.AppThreadsPageWriteCount + returnVal.BytesWrittenFrom = newStat.WiredTiger.Cache.BytesWrittenFrom + returnVal.BytesReadInto = newStat.WiredTiger.Cache.BytesReadInto + returnVal.PagesEvictedByAppThread = newStat.WiredTiger.Cache.PagesEvictedByAppThread + returnVal.PagesQueuedForEviction = newStat.WiredTiger.Cache.PagesQueuedForEviction + returnVal.ServerEvictingPages = newStat.WiredTiger.Cache.ServerEvictingPages + returnVal.WorkerThreadEvictingPages = newStat.WiredTiger.Cache.WorkerThreadEvictingPages } else if newStat.BackgroundFlushing != nil && oldStat.BackgroundFlushing != nil { returnVal.Flushes = newStat.BackgroundFlushing.Flushes - oldStat.BackgroundFlushing.Flushes } From 0a55ab42b4aff5051c7cd0e52930872621eba170 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 2 Oct 2017 17:39:32 -0700 Subject: [PATCH 05/95] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 432c07b95..464a31f68 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -36,6 +36,7 @@ - [#3106](https://github.com/influxdata/telegraf/pull/3106): Add configurable separator for metrics and fields in opentsdb output. - [#1692](https://github.com/influxdata/telegraf/pull/1692): Add support for the rollbar occurrence webhook event. - [#3160](https://github.com/influxdata/telegraf/pull/3160): Add Wavefront output plugin. +- [#3281](https://github.com/influxdata/telegraf/pull/3281): Add extra wired tiger cache metrics to mongodb input. ### Bugfixes From 79f66dc5b339ff21883045219aef5ae5d5cef2f0 Mon Sep 17 00:00:00 2001 From: Pierre Tessier Date: Mon, 2 Oct 2017 20:42:21 -0400 Subject: [PATCH 06/95] Added newline to each metric line in wavefront output (#3290) --- plugins/outputs/wavefront/wavefront.go | 4 +++- plugins/outputs/wavefront/wavefront_test.go | 2 +- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/plugins/outputs/wavefront/wavefront.go b/plugins/outputs/wavefront/wavefront.go index 773e970bc..85a73e319 100644 --- a/plugins/outputs/wavefront/wavefront.go +++ b/plugins/outputs/wavefront/wavefront.go @@ -124,7 +124,7 @@ func (w *Wavefront) Write(metrics []telegraf.Metric) error { for _, m := range metrics { for _, metricPoint := range buildMetrics(m, w) { metricLine := formatMetricPoint(metricPoint, w) - //log.Printf("D! Output [wavefront] %s", metricLine) + log.Printf("D! Output [wavefront] %s", metricLine) _, err := connection.Write([]byte(metricLine)) if err != nil { return fmt.Errorf("Wavefront: TCP writing error %s", err.Error()) @@ -262,6 +262,8 @@ func formatMetricPoint(metricPoint *MetricPoint, w *Wavefront) string { buffer.WriteString("\"") } + buffer.WriteString("\n") + return buffer.String() } diff --git a/plugins/outputs/wavefront/wavefront_test.go b/plugins/outputs/wavefront/wavefront_test.go index 1dd4d7078..f1722e668 100644 --- a/plugins/outputs/wavefront/wavefront_test.go +++ b/plugins/outputs/wavefront/wavefront_test.go @@ -275,7 +275,7 @@ func TestFormatMetricPoint(t *testing.T) { Tags: map[string]string{"sp*c!@l\"-ch/rs": "sp*c!@l/ val\"ue"}, } - expected := "test.metric.something 123.456000 1257894000 source=\"testSource\" sp-c--l--ch-rs=\"sp-c!@l/ val\\\"ue\"" + expected := "test.metric.something 123.456000 1257894000 source=\"testSource\" sp-c--l--ch-rs=\"sp-c!@l/ val\\\"ue\"\n" received := formatMetricPoint(testpoint, w) From b610276485a15a3342e95c5435e17fb7751746da Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 3 Oct 2017 10:54:31 -0700 Subject: [PATCH 07/95] Skip invalid urls in nginx input --- plugins/inputs/nginx/nginx.go | 1 + plugins/inputs/nginx_plus/nginx_plus.go | 1 + 2 files changed, 2 insertions(+) diff --git a/plugins/inputs/nginx/nginx.go b/plugins/inputs/nginx/nginx.go index d389997e2..3880dd91d 100644 --- a/plugins/inputs/nginx/nginx.go +++ b/plugins/inputs/nginx/nginx.go @@ -72,6 +72,7 @@ func (n *Nginx) Gather(acc telegraf.Accumulator) error { addr, err := url.Parse(u) if err != nil { acc.AddError(fmt.Errorf("Unable to parse address '%s': %s", u, err)) + continue } wg.Add(1) diff --git a/plugins/inputs/nginx_plus/nginx_plus.go b/plugins/inputs/nginx_plus/nginx_plus.go index 5a761071e..089ba7d93 100644 --- a/plugins/inputs/nginx_plus/nginx_plus.go +++ b/plugins/inputs/nginx_plus/nginx_plus.go @@ -59,6 +59,7 @@ func (n *NginxPlus) Gather(acc telegraf.Accumulator) error { addr, err := url.Parse(u) if err != nil { acc.AddError(fmt.Errorf("Unable to parse address '%s': %s", u, err)) + continue } wg.Add(1) From dd4299e9250c5c56b92e257540704896daff4925 Mon Sep 17 00:00:00 2001 From: Aditya C S Date: Wed, 4 Oct 2017 03:06:26 +0530 Subject: [PATCH 08/95] Collect Docker Swarm service metrics in docker input plugin (#3141) --- plugins/inputs/docker/client.go | 13 ++++ plugins/inputs/docker/docker.go | 83 ++++++++++++++++++++++++ plugins/inputs/docker/docker_test.go | 82 +++++++++++++++++++++++ plugins/inputs/docker/docker_testdata.go | 74 +++++++++++++++++++++ 4 files changed, 252 insertions(+) diff --git a/plugins/inputs/docker/client.go b/plugins/inputs/docker/client.go index e918231a0..a021b59c4 100644 --- a/plugins/inputs/docker/client.go +++ b/plugins/inputs/docker/client.go @@ -6,6 +6,7 @@ import ( "net/http" "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" docker "github.com/docker/docker/client" "github.com/docker/go-connections/sockets" ) @@ -20,6 +21,9 @@ type Client interface { ContainerList(ctx context.Context, options types.ContainerListOptions) ([]types.Container, error) ContainerStats(ctx context.Context, containerID string, stream bool) (types.ContainerStats, error) ContainerInspect(ctx context.Context, containerID string) (types.ContainerJSON, error) + ServiceList(ctx context.Context, options types.ServiceListOptions) ([]swarm.Service, error) + TaskList(ctx context.Context, options types.TaskListOptions) ([]swarm.Task, error) + NodeList(ctx context.Context, options types.NodeListOptions) ([]swarm.Node, error) } func NewEnvClient() (Client, error) { @@ -65,3 +69,12 @@ func (c *SocketClient) ContainerStats(ctx context.Context, containerID string, s func (c *SocketClient) ContainerInspect(ctx context.Context, containerID string) (types.ContainerJSON, error) { return c.client.ContainerInspect(ctx, containerID) } +func (c *SocketClient) ServiceList(ctx context.Context, options types.ServiceListOptions) ([]swarm.Service, error) { + return c.client.ServiceList(ctx, options) +} +func (c *SocketClient) TaskList(ctx context.Context, options types.TaskListOptions) ([]swarm.Task, error) { + return c.client.TaskList(ctx, options) +} +func (c *SocketClient) NodeList(ctx context.Context, options types.NodeListOptions) ([]swarm.Node, error) { + return c.client.NodeList(ctx, options) +} diff --git a/plugins/inputs/docker/docker.go b/plugins/inputs/docker/docker.go index 3634e596f..171097621 100644 --- a/plugins/inputs/docker/docker.go +++ b/plugins/inputs/docker/docker.go @@ -6,6 +6,7 @@ import ( "encoding/json" "fmt" "io" + "log" "net/http" "regexp" "strconv" @@ -14,6 +15,7 @@ import ( "time" "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/filter" "github.com/influxdata/telegraf/internal" @@ -35,6 +37,8 @@ type Docker struct { Endpoint string ContainerNames []string + GatherServices bool `toml:"gather_services"` + Timeout internal.Duration PerDevice bool `toml:"perdevice"` Total bool `toml:"total"` @@ -82,6 +86,9 @@ var sampleConfig = ` ## To use environment variables (ie, docker-machine), set endpoint = "ENV" endpoint = "unix:///var/run/docker.sock" + ## Set to true to collect Swarm metrics(desired_replicas, running_replicas) + gather_services = false + ## Only collect metrics for these containers, collect all if empty container_names = [] @@ -160,6 +167,13 @@ func (d *Docker) Gather(acc telegraf.Accumulator) error { acc.AddError(err) } + if d.GatherServices { + err := d.gatherSwarmInfo(acc) + if err != nil { + acc.AddError(err) + } + } + // List containers opts := types.ContainerListOptions{} ctx, cancel := context.WithTimeout(context.Background(), d.Timeout.Duration) @@ -187,6 +201,75 @@ func (d *Docker) Gather(acc telegraf.Accumulator) error { return nil } +func (d *Docker) gatherSwarmInfo(acc telegraf.Accumulator) error { + + ctx, cancel := context.WithTimeout(context.Background(), d.Timeout.Duration) + defer cancel() + services, err := d.client.ServiceList(ctx, types.ServiceListOptions{}) + if err != nil { + return err + } + + if len(services) > 0 { + + tasks, err := d.client.TaskList(ctx, types.TaskListOptions{}) + if err != nil { + return err + } + + nodes, err := d.client.NodeList(ctx, types.NodeListOptions{}) + if err != nil { + return err + } + + running := map[string]int{} + tasksNoShutdown := map[string]int{} + + activeNodes := make(map[string]struct{}) + for _, n := range nodes { + if n.Status.State != swarm.NodeStateDown { + activeNodes[n.ID] = struct{}{} + } + } + + for _, task := range tasks { + if task.DesiredState != swarm.TaskStateShutdown { + tasksNoShutdown[task.ServiceID]++ + } + + if task.Status.State == swarm.TaskStateRunning { + running[task.ServiceID]++ + } + } + + for _, service := range services { + tags := map[string]string{} + fields := make(map[string]interface{}) + now := time.Now() + tags["service_id"] = service.ID + tags["service_name"] = service.Spec.Name + if service.Spec.Mode.Replicated != nil && service.Spec.Mode.Replicated.Replicas != nil { + tags["service_mode"] = "replicated" + fields["tasks_running"] = running[service.ID] + fields["tasks_desired"] = *service.Spec.Mode.Replicated.Replicas + } else if service.Spec.Mode.Global != nil { + tags["service_mode"] = "global" + fields["tasks_running"] = running[service.ID] + fields["tasks_desired"] = tasksNoShutdown[service.ID] + } else { + log.Printf("E! Unknow Replicas Mode") + } + // Add metrics + acc.AddFields("docker_swarm", + fields, + tags, + now) + } + } + + return nil +} + func (d *Docker) gatherInfo(acc telegraf.Accumulator) error { // Init vars dataFields := make(map[string]interface{}) diff --git a/plugins/inputs/docker/docker_test.go b/plugins/inputs/docker/docker_test.go index 95adfcf8e..b18274136 100644 --- a/plugins/inputs/docker/docker_test.go +++ b/plugins/inputs/docker/docker_test.go @@ -8,6 +8,7 @@ import ( "github.com/influxdata/telegraf/testutil" "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" "github.com/stretchr/testify/require" ) @@ -16,6 +17,9 @@ type MockClient struct { ContainerListF func(ctx context.Context, options types.ContainerListOptions) ([]types.Container, error) ContainerStatsF func(ctx context.Context, containerID string, stream bool) (types.ContainerStats, error) ContainerInspectF func(ctx context.Context, containerID string) (types.ContainerJSON, error) + ServiceListF func(ctx context.Context, options types.ServiceListOptions) ([]swarm.Service, error) + TaskListF func(ctx context.Context, options types.TaskListOptions) ([]swarm.Task, error) + NodeListF func(ctx context.Context, options types.NodeListOptions) ([]swarm.Node, error) } func (c *MockClient) Info(ctx context.Context) (types.Info, error) { @@ -44,6 +48,27 @@ func (c *MockClient) ContainerInspect( return c.ContainerInspectF(ctx, containerID) } +func (c *MockClient) ServiceList( + ctx context.Context, + options types.ServiceListOptions, +) ([]swarm.Service, error) { + return c.ServiceListF(ctx, options) +} + +func (c *MockClient) TaskList( + ctx context.Context, + options types.TaskListOptions, +) ([]swarm.Task, error) { + return c.TaskListF(ctx, options) +} + +func (c *MockClient) NodeList( + ctx context.Context, + options types.NodeListOptions, +) ([]swarm.Node, error) { + return c.NodeListF(ctx, options) +} + func newClient(host string, tlsConfig *tls.Config) (Client, error) { return &MockClient{ InfoF: func(context.Context) (types.Info, error) { @@ -58,6 +83,15 @@ func newClient(host string, tlsConfig *tls.Config) (Client, error) { ContainerInspectF: func(context.Context, string) (types.ContainerJSON, error) { return containerInspect, nil }, + ServiceListF: func(context.Context, types.ServiceListOptions) ([]swarm.Service, error) { + return ServiceList, nil + }, + TaskListF: func(context.Context, types.TaskListOptions) ([]swarm.Task, error) { + return TaskList, nil + }, + NodeListF: func(context.Context, types.NodeListOptions) ([]swarm.Node, error) { + return NodeList, nil + }, }, nil } @@ -227,6 +261,15 @@ func TestDocker_WindowsMemoryContainerStats(t *testing.T) { ContainerInspectF: func(ctx context.Context, containerID string) (types.ContainerJSON, error) { return containerInspect, nil }, + ServiceListF: func(context.Context, types.ServiceListOptions) ([]swarm.Service, error) { + return ServiceList, nil + }, + TaskListF: func(context.Context, types.TaskListOptions) ([]swarm.Task, error) { + return TaskList, nil + }, + NodeListF: func(context.Context, types.NodeListOptions) ([]swarm.Node, error) { + return NodeList, nil + }, }, nil }, } @@ -436,3 +479,42 @@ func TestDockerGatherInfo(t *testing.T) { }, ) } + +func TestDockerGatherSwarmInfo(t *testing.T) { + var acc testutil.Accumulator + d := Docker{ + newClient: newClient, + } + + err := acc.GatherError(d.Gather) + require.NoError(t, err) + + d.gatherSwarmInfo(&acc) + + // test docker_container_net measurement + acc.AssertContainsTaggedFields(t, + "docker_swarm", + map[string]interface{}{ + "tasks_running": int(2), + "tasks_desired": uint64(2), + }, + map[string]string{ + "service_id": "qolkls9g5iasdiuihcyz9rnx2", + "service_name": "test1", + "service_mode": "replicated", + }, + ) + + acc.AssertContainsTaggedFields(t, + "docker_swarm", + map[string]interface{}{ + "tasks_running": int(1), + "tasks_desired": int(1), + }, + map[string]string{ + "service_id": "qolkls9g5iasdiuihcyz9rn3", + "service_name": "test2", + "service_mode": "global", + }, + ) +} diff --git a/plugins/inputs/docker/docker_testdata.go b/plugins/inputs/docker/docker_testdata.go index d16a3a728..929119fcb 100644 --- a/plugins/inputs/docker/docker_testdata.go +++ b/plugins/inputs/docker/docker_testdata.go @@ -8,6 +8,7 @@ import ( "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/registry" + "github.com/docker/docker/api/types/swarm" ) var info = types.Info{ @@ -133,6 +134,79 @@ var containerList = []types.Container{ }, } +var two = uint64(2) +var ServiceList = []swarm.Service{ + swarm.Service{ + ID: "qolkls9g5iasdiuihcyz9rnx2", + Spec: swarm.ServiceSpec{ + Annotations: swarm.Annotations{ + Name: "test1", + }, + Mode: swarm.ServiceMode{ + Replicated: &swarm.ReplicatedService{ + Replicas: &two, + }, + }, + }, + }, + swarm.Service{ + ID: "qolkls9g5iasdiuihcyz9rn3", + Spec: swarm.ServiceSpec{ + Annotations: swarm.Annotations{ + Name: "test2", + }, + Mode: swarm.ServiceMode{ + Global: &swarm.GlobalService{}, + }, + }, + }, +} + +var TaskList = []swarm.Task{ + swarm.Task{ + ID: "kwh0lv7hwwbh", + ServiceID: "qolkls9g5iasdiuihcyz9rnx2", + NodeID: "0cl4jturcyd1ks3fwpd010kor", + Status: swarm.TaskStatus{ + State: "running", + }, + DesiredState: "running", + }, + swarm.Task{ + ID: "u78m5ojbivc3", + ServiceID: "qolkls9g5iasdiuihcyz9rnx2", + NodeID: "0cl4jturcyd1ks3fwpd010kor", + Status: swarm.TaskStatus{ + State: "running", + }, + DesiredState: "running", + }, + swarm.Task{ + ID: "1n1uilkhr98l", + ServiceID: "qolkls9g5iasdiuihcyz9rn3", + NodeID: "0cl4jturcyd1ks3fwpd010kor", + Status: swarm.TaskStatus{ + State: "running", + }, + DesiredState: "running", + }, +} + +var NodeList = []swarm.Node{ + swarm.Node{ + ID: "0cl4jturcyd1ks3fwpd010kor", + Status: swarm.NodeStatus{ + State: "ready", + }, + }, + swarm.Node{ + ID: "0cl4jturcyd1ks3fwpd010kor", + Status: swarm.NodeStatus{ + State: "ready", + }, + }, +} + func containerStats() types.ContainerStats { var stat types.ContainerStats jsonStat := ` From 93ff8113585c1ed73fcab54ef247fc1fdc257162 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 3 Oct 2017 14:37:02 -0700 Subject: [PATCH 09/95] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 464a31f68..d33fab200 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -37,6 +37,7 @@ - [#1692](https://github.com/influxdata/telegraf/pull/1692): Add support for the rollbar occurrence webhook event. - [#3160](https://github.com/influxdata/telegraf/pull/3160): Add Wavefront output plugin. - [#3281](https://github.com/influxdata/telegraf/pull/3281): Add extra wired tiger cache metrics to mongodb input. +- [#3141](https://github.com/influxdata/telegraf/pull/3141): Collect Docker Swarm service metrics in docker input plugin. ### Bugfixes From a163effa6dc9e70857066b63eacaa41024ab46ee Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 3 Oct 2017 15:22:57 -0700 Subject: [PATCH 10/95] Add support for proxy environment variables to http_response (#3302) --- plugins/inputs/http_response/http_response.go | 1 + 1 file changed, 1 insertion(+) diff --git a/plugins/inputs/http_response/http_response.go b/plugins/inputs/http_response/http_response.go index 62d6e9ec1..3dd7f9ba5 100644 --- a/plugins/inputs/http_response/http_response.go +++ b/plugins/inputs/http_response/http_response.go @@ -98,6 +98,7 @@ func (h *HTTPResponse) createHttpClient() (*http.Client, error) { } client := &http.Client{ Transport: &http.Transport{ + Proxy: http.ProxyFromEnvironment, DisableKeepAlives: true, TLSClientConfig: tlsCfg, }, From 002ccf3295f1aeec17b45c6356ac63cee4a21581 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 3 Oct 2017 15:25:19 -0700 Subject: [PATCH 11/95] Update changelog --- CHANGELOG.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index d33fab200..19435b098 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -21,7 +21,6 @@ - [#3170](https://github.com/influxdata/telegraf/pull/3170): Add support for sharding based on metric name. - [#3196](https://github.com/influxdata/telegraf/pull/3196): Add Kafka output plugin topic_suffix option. - [#3027](https://github.com/influxdata/telegraf/pull/3027): Include mount mode option in disk metrics. -- [#3212](https://github.com/influxdata/telegraf/pull/3212): Add support for standard proxy env vars in outputs. - [#3191](https://github.com/influxdata/telegraf/pull/3191): TLS and MTLS enhancements to HTTPListener input plugin. - [#3213](https://github.com/influxdata/telegraf/pull/3213): Add polling method to logparser and tail inputs. - [#3211](https://github.com/influxdata/telegraf/pull/3211): Add timeout option for kubernetes input. @@ -53,6 +52,8 @@ - [#2887](https://github.com/influxdata/telegraf/issues/2887): Allow JSON data format to contain zero metrics. - [#3284](https://github.com/influxdata/telegraf/issues/3284): Fix format of connection_timeout in mqtt_consumer. - [#3081](https://github.com/influxdata/telegraf/issues/3081): Fix case sensitivity error in sqlserver input. +- [#3297](https://github.com/influxdata/telegraf/issues/3297): Add support for proxy environment variables to http_response. +- [#1588](https://github.com/influxdata/telegraf/issues/1588): Add support for standard proxy env vars in outputs. ## v1.4.1 [2017-09-26] From e69c3f9d1c0378cf4b67d4a4d3e23edf82e7cf2b Mon Sep 17 00:00:00 2001 From: Rickard von Essen Date: Thu, 5 Oct 2017 00:15:58 +0200 Subject: [PATCH 12/95] Add smart input plugin for collecting S.M.A.R.T. data (#2449) --- plugins/inputs/all/all.go | 1 + plugins/inputs/smart/README.md | 135 +++++++++ plugins/inputs/smart/smart.go | 339 +++++++++++++++++++++++ plugins/inputs/smart/smart_test.go | 426 +++++++++++++++++++++++++++++ 4 files changed, 901 insertions(+) create mode 100644 plugins/inputs/smart/README.md create mode 100644 plugins/inputs/smart/smart.go create mode 100644 plugins/inputs/smart/smart_test.go diff --git a/plugins/inputs/all/all.go b/plugins/inputs/all/all.go index 45d040503..1f4d3825c 100644 --- a/plugins/inputs/all/all.go +++ b/plugins/inputs/all/all.go @@ -76,6 +76,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/inputs/riak" _ "github.com/influxdata/telegraf/plugins/inputs/salesforce" _ "github.com/influxdata/telegraf/plugins/inputs/sensors" + _ "github.com/influxdata/telegraf/plugins/inputs/smart" _ "github.com/influxdata/telegraf/plugins/inputs/snmp" _ "github.com/influxdata/telegraf/plugins/inputs/snmp_legacy" _ "github.com/influxdata/telegraf/plugins/inputs/socket_listener" diff --git a/plugins/inputs/smart/README.md b/plugins/inputs/smart/README.md new file mode 100644 index 000000000..a81b344a5 --- /dev/null +++ b/plugins/inputs/smart/README.md @@ -0,0 +1,135 @@ +# Telegraf S.M.A.R.T. plugin + +Get metrics using the command line utility `smartctl` for S.M.A.R.T. (Self-Monitoring, Analysis and Reporting Technology) storage devices. SMART is a monitoring system included in computer hard disk drives (HDDs) and solid-state drives (SSDs)[1] that detects and reports on various indicators of drive reliability, with the intent of enabling the anticipation of hardware failures. +See smartmontools (https://www.smartmontools.org/). + +If no devices are specified, the plugin will scan for SMART devices via the following command: + +``` +smartctl --scan +``` + +Metrics will be reported from the following `smartctl` command: + +``` +smartctl --info --attributes --health -n --format=brief +``` + +This plugin supports _smartmontools_ version 5.41 and above, but v. 5.41 and v. 5.42 +might require setting `nocheck`, see the comment in the sample configuration. + +To enable SMART on a storage device run: + +``` +smartctl -s on +``` + +## Measurements + +- smart_device: + + * Tags: + - `capacity` + - `device` + - `device_model` + - `enabled` + - `health` + - `serial_no` + - `wwn` + * Fields: + - `exit_status` + - `health_ok` + - `read_error_rate` + - `seek_error` + - `temp_c` + - `udma_crc_errors` + +- smart_attribute: + + * Tags: + - `device` + - `fail` + - `flags` + - `id` + - `name` + - `serial_no` + - `wwn` + * Fields: + - `exit_status` + - `raw_value` + - `threshold` + - `value` + - `worst` + +### Flags + +The interpretation of the tag `flags` is: + - *K* auto-keep + - *C* event count + - *R* error rate + - *S* speed/performance + - *O* updated online + - *P* prefailure warning + +### Exit Status + +The `exit_status` field captures the exit status of the smartctl command which +is defined by a bitmask. For the interpretation of the bitmask see the man page for +smartctl. + +### Device Names + +Device names, e.g., `/dev/sda`, are *not persistent*, and may be +subject to change across reboots or system changes. Instead, you can the +*World Wide Name* (WWN) or serial number to identify devices. On Linux block +devices can be referenced by the WWN in the following location: +`/dev/disk/by-id/`. + +## Configuration + +```toml +# Read metrics from storage devices supporting S.M.A.R.T. +[[inputs.smart]] + ## Optionally specify the path to the smartctl executable + # path = "/usr/bin/smartctl" + # + ## On most platforms smartctl requires root access. + ## Setting 'use_sudo' to true will make use of sudo to run smartctl. + ## Sudo must be configured to to allow the telegraf user to run smartctl + ## with out password. + # use_sudo = false + # + ## Skip checking disks in this power mode. Defaults to + ## "standby" to not wake up disks that have stoped rotating. + ## See --nockeck in the man pages for smartctl. + ## smartctl version 5.41 and 5.42 have faulty detection of + ## power mode and might require changing this value to + ## "never" depending on your storage device. + # nocheck = "standby" + # + ## Gather detailed metrics for each SMART Attribute. + ## Defaults to "false" + ## + # attributes = false + # + ## Optionally specify devices to exclude from reporting. + # excludes = [ "/dev/pass6" ] + # + ## Optionally specify devices and device type, if unset + ## a scan (smartctl --scan) for S.M.A.R.T. devices will + ## done and all found will be included except for the + ## excluded in excludes. + # devices = [ "/dev/ada0 -d atacam" ] +``` + +To run `smartctl` with `sudo` create a wrapper script and use `path` in +the configuration to execute that. + +## Output + +Example output from an _Apple SSD_: +``` +> smart_attribute,serial_no=S1K5NYCD964433,wwn=5002538655584d30,id=199,name=UDMA_CRC_Error_Count,flags=-O-RC-,fail=-,host=mbpro.local,device=/dev/rdisk0 threshold=0i,raw_value=0i,exit_status=0i,value=200i,worst=200i 1502536854000000000 +> smart_attribute,device=/dev/rdisk0,serial_no=S1K5NYCD964433,wwn=5002538655584d30,id=240,name=Unknown_SSD_Attribute,flags=-O---K,fail=-,host=mbpro.local exit_status=0i,value=100i,worst=100i,threshold=0i,raw_value=0i 1502536854000000000 +> smart_device,enabled=Enabled,host=mbpro.local,device=/dev/rdisk0,model=APPLE\ SSD\ SM0512F,serial_no=S1K5NYCD964433,wwn=5002538655584d30,capacity=500277790720 udma_crc_errors=0i,exit_status=0i,health_ok=true,read_error_rate=0i,temp_c=40i 1502536854000000000 +``` diff --git a/plugins/inputs/smart/smart.go b/plugins/inputs/smart/smart.go new file mode 100644 index 000000000..a754d1ace --- /dev/null +++ b/plugins/inputs/smart/smart.go @@ -0,0 +1,339 @@ +package smart + +import ( + "fmt" + "os/exec" + "regexp" + "strconv" + "strings" + "sync" + "syscall" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/plugins/inputs" +) + +var ( + execCommand = exec.Command // execCommand is used to mock commands in tests. + + // Device Model: APPLE SSD SM256E + modelInInfo = regexp.MustCompile("^Device Model:\\s+(.*)$") + // Serial Number: S0X5NZBC422720 + serialInInfo = regexp.MustCompile("^Serial Number:\\s+(.*)$") + // LU WWN Device Id: 5 002538 655584d30 + wwnInInfo = regexp.MustCompile("^LU WWN Device Id:\\s+(.*)$") + // User Capacity: 251,000,193,024 bytes [251 GB] + usercapacityInInfo = regexp.MustCompile("^User Capacity:\\s+([0-9,]+)\\s+bytes.*$") + // SMART support is: Enabled + smartEnabledInInfo = regexp.MustCompile("^SMART support is:\\s+(\\w+)$") + // SMART overall-health self-assessment test result: PASSED + // PASSED, FAILED, UNKNOWN + smartOverallHealth = regexp.MustCompile("^SMART overall-health self-assessment test result:\\s+(\\w+).*$") + + // ID# ATTRIBUTE_NAME FLAGS VALUE WORST THRESH FAIL RAW_VALUE + // 1 Raw_Read_Error_Rate -O-RC- 200 200 000 - 0 + // 5 Reallocated_Sector_Ct PO--CK 100 100 000 - 0 + // 192 Power-Off_Retract_Count -O--C- 097 097 000 - 14716 + attribute = regexp.MustCompile("^\\s*([0-9]+)\\s(\\S+)\\s+([-P][-O][-S][-R][-C][-K])\\s+([0-9]+)\\s+([0-9]+)\\s+([0-9]+)\\s+([-\\w]+)\\s+([\\w\\+\\.]+).*$") + + deviceFieldIds = map[string]string{ + "1": "read_error_rate", + "7": "seek_error_rate", + "194": "temp_c", + "199": "udma_crc_errors", + } +) + +type Smart struct { + Path string + Nocheck string + Attributes bool + Excludes []string + Devices []string + UseSudo bool +} + +var sampleConfig = ` + ## Optionally specify the path to the smartctl executable + # path = "/usr/bin/smartctl" + # + ## On most platforms smartctl requires root access. + ## Setting 'use_sudo' to true will make use of sudo to run smartctl. + ## Sudo must be configured to to allow the telegraf user to run smartctl + ## with out password. + # use_sudo = false + # + ## Skip checking disks in this power mode. Defaults to + ## "standby" to not wake up disks that have stoped rotating. + ## See --nocheck in the man pages for smartctl. + ## smartctl version 5.41 and 5.42 have faulty detection of + ## power mode and might require changing this value to + ## "never" depending on your disks. + # nocheck = "standby" + # + ## Gather detailed metrics for each SMART Attribute. + ## Defaults to "false" + ## + # attributes = false + # + ## Optionally specify devices to exclude from reporting. + # excludes = [ "/dev/pass6" ] + # + ## Optionally specify devices and device type, if unset + ## a scan (smartctl --scan) for S.M.A.R.T. devices will + ## done and all found will be included except for the + ## excluded in excludes. + # devices = [ "/dev/ada0 -d atacam" ] +` + +func (m *Smart) SampleConfig() string { + return sampleConfig +} + +func (m *Smart) Description() string { + return "Read metrics from storage devices supporting S.M.A.R.T." +} + +func (m *Smart) Gather(acc telegraf.Accumulator) error { + if len(m.Path) == 0 { + return fmt.Errorf("smartctl not found: verify that smartctl is installed and that smartctl is in your PATH") + } + + devices := m.Devices + if len(devices) == 0 { + var err error + devices, err = m.scan() + if err != nil { + return err + } + } + + m.getAttributes(acc, devices) + return nil +} + +// Wrap with sudo +func sudo(sudo bool, command string, args ...string) *exec.Cmd { + if sudo { + return execCommand("sudo", append([]string{"-n", command}, args...)...) + } + + return execCommand(command, args...) +} + +// Scan for S.M.A.R.T. devices +func (m *Smart) scan() ([]string, error) { + + cmd := sudo(m.UseSudo, m.Path, "--scan") + out, err := internal.CombinedOutputTimeout(cmd, time.Second*5) + if err != nil { + return []string{}, fmt.Errorf("failed to run command %s: %s - %s", strings.Join(cmd.Args, " "), err, string(out)) + } + + devices := []string{} + for _, line := range strings.Split(string(out), "\n") { + dev := strings.Split(line, "#") + if len(dev) > 1 && !excludedDev(m.Excludes, strings.TrimSpace(dev[0])) { + devices = append(devices, strings.TrimSpace(dev[0])) + } + } + return devices, nil +} + +func excludedDev(excludes []string, deviceLine string) bool { + device := strings.Split(deviceLine, " ") + if len(device) != 0 { + for _, exclude := range excludes { + if device[0] == exclude { + return true + } + } + } + return false +} + +// Get info and attributes for each S.M.A.R.T. device +func (m *Smart) getAttributes(acc telegraf.Accumulator, devices []string) { + + var wg sync.WaitGroup + wg.Add(len(devices)) + + for _, device := range devices { + go gatherDisk(acc, m.UseSudo, m.Attributes, m.Path, m.Nocheck, device, &wg) + } + + wg.Wait() +} + +// Command line parse errors are denoted by the exit code having the 0 bit set. +// All other errors are drive/communication errors and should be ignored. +func exitStatus(err error) (int, error) { + if exiterr, ok := err.(*exec.ExitError); ok { + if status, ok := exiterr.Sys().(syscall.WaitStatus); ok { + return status.ExitStatus(), nil + } + } + return 0, err +} + +func gatherDisk(acc telegraf.Accumulator, usesudo, attributes bool, path, nockeck, device string, wg *sync.WaitGroup) { + + defer wg.Done() + // smartctl 5.41 & 5.42 have are broken regarding handling of --nocheck/-n + args := []string{"--info", "--health", "--attributes", "--tolerance=verypermissive", "-n", nockeck, "--format=brief"} + args = append(args, strings.Split(device, " ")...) + cmd := sudo(usesudo, path, args...) + out, e := internal.CombinedOutputTimeout(cmd, time.Second*5) + outStr := string(out) + + // Ignore all exit statuses except if it is a command line parse error + exitStatus, er := exitStatus(e) + if er != nil { + acc.AddError(fmt.Errorf("failed to run command %s: %s - %s", strings.Join(cmd.Args, " "), e, outStr)) + return + } + + device_tags := map[string]string{} + device_tags["device"] = strings.Split(device, " ")[0] + device_fields := make(map[string]interface{}) + device_fields["exit_status"] = exitStatus + + for _, line := range strings.Split(outStr, "\n") { + + model := modelInInfo.FindStringSubmatch(line) + if len(model) > 1 { + device_tags["model"] = model[1] + } + + serial := serialInInfo.FindStringSubmatch(line) + if len(serial) > 1 { + device_tags["serial_no"] = serial[1] + } + + wwn := wwnInInfo.FindStringSubmatch(line) + if len(wwn) > 1 { + device_tags["wwn"] = strings.Replace(wwn[1], " ", "", -1) + } + + capacity := usercapacityInInfo.FindStringSubmatch(line) + if len(capacity) > 1 { + device_tags["capacity"] = strings.Replace(capacity[1], ",", "", -1) + } + + enabled := smartEnabledInInfo.FindStringSubmatch(line) + if len(enabled) > 1 { + device_tags["enabled"] = enabled[1] + } + + health := smartOverallHealth.FindStringSubmatch(line) + if len(health) > 1 { + device_fields["health_ok"] = (health[1] == "PASSED") + } + + attr := attribute.FindStringSubmatch(line) + + if len(attr) > 1 { + + if attributes { + tags := map[string]string{} + fields := make(map[string]interface{}) + + tags["device"] = strings.Split(device, " ")[0] + + if serial, ok := device_tags["serial_no"]; ok { + tags["serial_no"] = serial + } + if wwn, ok := device_tags["wwn"]; ok { + tags["wwn"] = wwn + } + tags["id"] = attr[1] + tags["name"] = attr[2] + tags["flags"] = attr[3] + + fields["exit_status"] = exitStatus + if i, err := strconv.ParseInt(attr[4], 10, 64); err == nil { + fields["value"] = i + } + if i, err := strconv.ParseInt(attr[5], 10, 64); err == nil { + fields["worst"] = i + } + if i, err := strconv.ParseInt(attr[6], 10, 64); err == nil { + fields["threshold"] = i + } + + tags["fail"] = attr[7] + if val, err := parseRawValue(attr[8]); err == nil { + fields["raw_value"] = val + } + + acc.AddFields("smart_attribute", fields, tags) + } + + // If the attribute matches on the one in deviceFieldIds + // save the raw value to a field. + if field, ok := deviceFieldIds[attr[1]]; ok { + if val, err := parseRawValue(attr[8]); err == nil { + device_fields[field] = val + } + } + } + } + acc.AddFields("smart_device", device_fields, device_tags) +} + +func parseRawValue(rawVal string) (int64, error) { + + // Integer + if i, err := strconv.ParseInt(rawVal, 10, 64); err == nil { + return i, nil + } + + // Duration: 65h+33m+09.259s + unit := regexp.MustCompile("^(.*)([hms])$") + parts := strings.Split(rawVal, "+") + if len(parts) == 0 { + return 0, fmt.Errorf("Couldn't parse RAW_VALUE '%s'", rawVal) + } + + duration := int64(0) + for _, part := range parts { + timePart := unit.FindStringSubmatch(part) + if len(timePart) == 0 { + continue + } + switch timePart[2] { + case "h": + duration += parseInt(timePart[1]) * int64(3600) + case "m": + duration += parseInt(timePart[1]) * int64(60) + case "s": + // drop fractions of seconds + duration += parseInt(strings.Split(timePart[1], ".")[0]) + default: + // Unknown, ignore + } + } + return duration, nil +} + +func parseInt(str string) int64 { + if i, err := strconv.ParseInt(str, 10, 64); err == nil { + return i + } + return 0 +} + +func init() { + m := Smart{} + path, _ := exec.LookPath("smartctl") + if len(path) > 0 { + m.Path = path + } + m.Nocheck = "standby" + + inputs.Add("smart", func() telegraf.Input { + return &m + }) +} diff --git a/plugins/inputs/smart/smart_test.go b/plugins/inputs/smart/smart_test.go new file mode 100644 index 000000000..c8e777033 --- /dev/null +++ b/plugins/inputs/smart/smart_test.go @@ -0,0 +1,426 @@ +package smart + +import ( + "fmt" + "os" + "os/exec" + "testing" + + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +var ( + mockScanData = `/dev/ada0 -d atacam # /dev/ada0, ATA device +` + mockInfoAttributeData = `smartctl 6.5 2016-05-07 r4318 [Darwin 16.4.0 x86_64] (local build) +Copyright (C) 2002-16, Bruce Allen, Christian Franke, www.smartmontools.org + +CHECK POWER MODE not implemented, ignoring -n option +=== START OF INFORMATION SECTION === +Model Family: Apple SD/SM/TS...E/F SSDs +Device Model: APPLE SSD SM256E +Serial Number: S0X5NZBC422720 +LU WWN Device Id: 5 002538 043584d30 +Firmware Version: CXM09A1Q +User Capacity: 251,000,193,024 bytes [251 GB] +Sector Sizes: 512 bytes logical, 4096 bytes physical +Rotation Rate: Solid State Device +Device is: In smartctl database [for details use: -P show] +ATA Version is: ATA8-ACS T13/1699-D revision 4c +SATA Version is: SATA 3.0, 6.0 Gb/s (current: 6.0 Gb/s) +Local Time is: Thu Feb 9 16:48:45 2017 CET +SMART support is: Available - device has SMART capability. +SMART support is: Enabled + +=== START OF READ SMART DATA SECTION === +SMART overall-health self-assessment test result: PASSED + +=== START OF READ SMART DATA SECTION === +SMART Attributes Data Structure revision number: 1 +Vendor Specific SMART Attributes with Thresholds: +ID# ATTRIBUTE_NAME FLAGS VALUE WORST THRESH FAIL RAW_VALUE + 1 Raw_Read_Error_Rate -O-RC- 200 200 000 - 0 + 5 Reallocated_Sector_Ct PO--CK 100 100 000 - 0 + 9 Power_On_Hours -O--CK 099 099 000 - 2988 + 12 Power_Cycle_Count -O--CK 085 085 000 - 14879 +169 Unknown_Attribute PO--C- 253 253 010 - 2044932921600 +173 Wear_Leveling_Count -O--CK 185 185 100 - 957808640337 +190 Airflow_Temperature_Cel -O---K 055 040 045 Past 45 (Min/Max 43/57 #2689) +192 Power-Off_Retract_Count -O--C- 097 097 000 - 14716 +194 Temperature_Celsius -O---K 066 021 000 - 34 (Min/Max 14/79) +197 Current_Pending_Sector -O---K 100 100 000 - 0 +199 UDMA_CRC_Error_Count -O-RC- 200 200 000 - 0 +240 Head_Flying_Hours ------ 100 253 000 - 6585h+55m+23.234s + ||||||_ K auto-keep + |||||__ C event count + ||||___ R error rate + |||____ S speed/performance + ||_____ O updated online + |______ P prefailure warning +` +) + +func TestGatherAttributes(t *testing.T) { + s := &Smart{ + Path: "smartctl", + Attributes: true, + } + // overwriting exec commands with mock commands + execCommand = fakeExecCommand + var acc testutil.Accumulator + + err := s.Gather(&acc) + + require.NoError(t, err) + assert.Equal(t, 65, acc.NFields(), "Wrong number of fields gathered") + + var testsAda0Attributes = []struct { + fields map[string]interface{} + tags map[string]string + }{ + { + map[string]interface{}{ + "value": int64(200), + "worst": int64(200), + "threshold": int64(0), + "raw_value": int64(0), + "exit_status": int(0), + }, + map[string]string{ + "device": "/dev/ada0", + "serial_no": "S0X5NZBC422720", + "wwn": "5002538043584d30", + "id": "1", + "name": "Raw_Read_Error_Rate", + "flags": "-O-RC-", + "fail": "-", + }, + }, + { + map[string]interface{}{ + "value": int64(100), + "worst": int64(100), + "threshold": int64(0), + "raw_value": int64(0), + "exit_status": int(0), + }, + map[string]string{ + "device": "/dev/ada0", + "serial_no": "S0X5NZBC422720", + "wwn": "5002538043584d30", + "id": "5", + "name": "Reallocated_Sector_Ct", + "flags": "PO--CK", + "fail": "-", + }, + }, + { + map[string]interface{}{ + "value": int64(99), + "worst": int64(99), + "threshold": int64(0), + "raw_value": int64(2988), + "exit_status": int(0), + }, + map[string]string{ + "device": "/dev/ada0", + "serial_no": "S0X5NZBC422720", + "wwn": "5002538043584d30", + "id": "9", + "name": "Power_On_Hours", + "flags": "-O--CK", + "fail": "-", + }, + }, + { + map[string]interface{}{ + "value": int64(85), + "worst": int64(85), + "threshold": int64(0), + "raw_value": int64(14879), + "exit_status": int(0), + }, + map[string]string{ + "device": "/dev/ada0", + "serial_no": "S0X5NZBC422720", + "wwn": "5002538043584d30", + "id": "12", + "name": "Power_Cycle_Count", + "flags": "-O--CK", + "fail": "-", + }, + }, + { + map[string]interface{}{ + "value": int64(253), + "worst": int64(253), + "threshold": int64(10), + "raw_value": int64(2044932921600), + "exit_status": int(0), + }, + map[string]string{ + "device": "/dev/ada0", + "serial_no": "S0X5NZBC422720", + "wwn": "5002538043584d30", + "id": "169", + "name": "Unknown_Attribute", + "flags": "PO--C-", + "fail": "-", + }, + }, + { + map[string]interface{}{ + "value": int64(185), + "worst": int64(185), + "threshold": int64(100), + "raw_value": int64(957808640337), + "exit_status": int(0), + }, + map[string]string{ + "device": "/dev/ada0", + "serial_no": "S0X5NZBC422720", + "wwn": "5002538043584d30", + "id": "173", + "name": "Wear_Leveling_Count", + "flags": "-O--CK", + "fail": "-", + }, + }, + { + map[string]interface{}{ + "value": int64(55), + "worst": int64(40), + "threshold": int64(45), + "raw_value": int64(45), + "exit_status": int(0), + }, + map[string]string{ + "device": "/dev/ada0", + "serial_no": "S0X5NZBC422720", + "wwn": "5002538043584d30", + "id": "190", + "name": "Airflow_Temperature_Cel", + "flags": "-O---K", + "fail": "Past", + }, + }, + { + map[string]interface{}{ + "value": int64(97), + "worst": int64(97), + "threshold": int64(0), + "raw_value": int64(14716), + "exit_status": int(0), + }, + map[string]string{ + "device": "/dev/ada0", + "serial_no": "S0X5NZBC422720", + "wwn": "5002538043584d30", + "id": "192", + "name": "Power-Off_Retract_Count", + "flags": "-O--C-", + "fail": "-", + }, + }, + { + map[string]interface{}{ + "value": int64(66), + "worst": int64(21), + "threshold": int64(0), + "raw_value": int64(34), + "exit_status": int(0), + }, + map[string]string{ + "device": "/dev/ada0", + "serial_no": "S0X5NZBC422720", + "wwn": "5002538043584d30", + "id": "194", + "name": "Temperature_Celsius", + "flags": "-O---K", + "fail": "-", + }, + }, + { + map[string]interface{}{ + "value": int64(100), + "worst": int64(100), + "threshold": int64(0), + "raw_value": int64(0), + "exit_status": int(0), + }, + map[string]string{ + "device": "/dev/ada0", + "serial_no": "S0X5NZBC422720", + "wwn": "5002538043584d30", + "id": "197", + "name": "Current_Pending_Sector", + "flags": "-O---K", + "fail": "-", + }, + }, + { + map[string]interface{}{ + "value": int64(200), + "worst": int64(200), + "threshold": int64(0), + "raw_value": int64(0), + "exit_status": int(0), + }, + map[string]string{ + "device": "/dev/ada0", + "serial_no": "S0X5NZBC422720", + "wwn": "5002538043584d30", + "id": "199", + "name": "UDMA_CRC_Error_Count", + "flags": "-O-RC-", + "fail": "-", + }, + }, + { + map[string]interface{}{ + "value": int64(100), + "worst": int64(253), + "threshold": int64(0), + "raw_value": int64(23709323), + "exit_status": int(0), + }, + map[string]string{ + "device": "/dev/ada0", + "serial_no": "S0X5NZBC422720", + "wwn": "5002538043584d30", + "id": "240", + "name": "Head_Flying_Hours", + "flags": "------", + "fail": "-", + }, + }, + } + + for _, test := range testsAda0Attributes { + acc.AssertContainsTaggedFields(t, "smart_attribute", test.fields, test.tags) + } + + // tags = map[string]string{} + + var testsAda0Device = []struct { + fields map[string]interface{} + tags map[string]string + }{ + { + map[string]interface{}{ + "exit_status": int(0), + "health_ok": bool(true), + "read_error_rate": int64(0), + "temp_c": int64(34), + "udma_crc_errors": int64(0), + }, + map[string]string{ + "device": "/dev/ada0", + "model": "APPLE SSD SM256E", + "serial_no": "S0X5NZBC422720", + "wwn": "5002538043584d30", + "enabled": "Enabled", + "capacity": "251000193024", + }, + }, + } + + for _, test := range testsAda0Device { + acc.AssertContainsTaggedFields(t, "smart_device", test.fields, test.tags) + } + +} + +func TestGatherNoAttributes(t *testing.T) { + s := &Smart{ + Path: "smartctl", + Attributes: false, + } + // overwriting exec commands with mock commands + execCommand = fakeExecCommand + var acc testutil.Accumulator + + err := s.Gather(&acc) + + require.NoError(t, err) + assert.Equal(t, 5, acc.NFields(), "Wrong number of fields gathered") + acc.AssertDoesNotContainMeasurement(t, "smart_attribute") + + // tags = map[string]string{} + + var testsAda0Device = []struct { + fields map[string]interface{} + tags map[string]string + }{ + { + map[string]interface{}{ + "exit_status": int(0), + "health_ok": bool(true), + "read_error_rate": int64(0), + "temp_c": int64(34), + "udma_crc_errors": int64(0), + }, + map[string]string{ + "device": "/dev/ada0", + "model": "APPLE SSD SM256E", + "serial_no": "S0X5NZBC422720", + "wwn": "5002538043584d30", + "enabled": "Enabled", + "capacity": "251000193024", + }, + }, + } + + for _, test := range testsAda0Device { + acc.AssertContainsTaggedFields(t, "smart_device", test.fields, test.tags) + } + +} + +func TestExcludedDev(t *testing.T) { + assert.Equal(t, true, excludedDev([]string{"/dev/pass6"}, "/dev/pass6 -d atacam"), "Should be excluded.") + assert.Equal(t, false, excludedDev([]string{}, "/dev/pass6 -d atacam"), "Shouldn't be excluded.") + assert.Equal(t, false, excludedDev([]string{"/dev/pass6"}, "/dev/pass1 -d atacam"), "Shouldn't be excluded.") + +} + +// fackeExecCommand is a helper function that mock +// the exec.Command call (and call the test binary) +func fakeExecCommand(command string, args ...string) *exec.Cmd { + cs := []string{"-test.run=TestHelperProcess", "--", command} + cs = append(cs, args...) + cmd := exec.Command(os.Args[0], cs...) + cmd.Env = []string{"GO_WANT_HELPER_PROCESS=1"} + return cmd +} + +// TestHelperProcess isn't a real test. It's used to mock exec.Command +// For example, if you run: +// GO_WANT_HELPER_PROCESS=1 go test -test.run=TestHelperProcess -- --scan +// it returns below mockScanData. +func TestHelperProcess(t *testing.T) { + if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" { + return + } + + args := os.Args + + // Previous arguments are tests stuff, that looks like : + // /tmp/go-build970079519/…/_test/integration.test -test.run=TestHelperProcess -- + cmd, arg1, args := args[3], args[4], args[5:] + + if cmd == "smartctl" { + if arg1 == "--scan" { + fmt.Fprint(os.Stdout, mockScanData) + } + if arg1 == "--info" { + fmt.Fprint(os.Stdout, mockInfoAttributeData) + } + } else { + fmt.Fprint(os.Stdout, "command not found") + os.Exit(1) + } + os.Exit(0) +} From a4ea4c7a2560d125654f4aad7487ce6ed238fe14 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 4 Oct 2017 15:18:15 -0700 Subject: [PATCH 13/95] Add smart to changelog and readme --- CHANGELOG.md | 6 ++++-- README.md | 1 + 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 19435b098..a2d176959 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,9 +1,10 @@ ## v1.5 [unreleased] ### New Plugins -- [nginx_plus](./plugins/inputs/nginx_plus/README.md) - Thanks to @mplonka & @poblahblahblah - [jolokia2](./plugins/inputs/jolokia2/README.md) - Thanks to @dylanmei -- [wavefront](./plugins/inputswavefront/README.md) - Thanks to @puckpuck +- [nginx_plus](./plugins/inputs/nginx_plus/README.md) - Thanks to @mplonka & @poblahblahblah +- [smart](./plugins/inputs/smart/README.md) - Thanks to @rickard-von-essen +- [wavefront](./plugins/inputs/wavefront/README.md) - Thanks to @puckpuck ### Release Notes @@ -37,6 +38,7 @@ - [#3160](https://github.com/influxdata/telegraf/pull/3160): Add Wavefront output plugin. - [#3281](https://github.com/influxdata/telegraf/pull/3281): Add extra wired tiger cache metrics to mongodb input. - [#3141](https://github.com/influxdata/telegraf/pull/3141): Collect Docker Swarm service metrics in docker input plugin. +- [#2449](https://github.com/influxdata/telegraf/pull/2449): Add smart input plugin for collecting S.M.A.R.T. data. ### Bugfixes diff --git a/README.md b/README.md index 9e7ad03f9..a3887e566 100644 --- a/README.md +++ b/README.md @@ -193,6 +193,7 @@ configuration options. * [riak](./plugins/inputs/riak) * [salesforce](./plugins/inputs/salesforce) * [sensors](./plugins/inputs/sensors) +* [smart](./plugins/inputs/smart) * [snmp](./plugins/inputs/snmp) * [snmp_legacy](./plugins/inputs/snmp_legacy) * [sql server](./plugins/inputs/sqlserver) (microsoft) From 0bb32570ba3c786b5932522a7564e9977cd3acc7 Mon Sep 17 00:00:00 2001 From: Christian Meilke Date: Thu, 5 Oct 2017 00:29:32 +0200 Subject: [PATCH 14/95] Add cluster health level configuration to elasticsearch input (#3269) --- plugins/inputs/elasticsearch/README.md | 10 +++- plugins/inputs/elasticsearch/elasticsearch.go | 24 ++++++--- .../elasticsearch/elasticsearch_test.go | 54 ++++++++++++++++++- plugins/inputs/elasticsearch/testdata_test.go | 15 ++++++ 4 files changed, 92 insertions(+), 11 deletions(-) diff --git a/plugins/inputs/elasticsearch/README.md b/plugins/inputs/elasticsearch/README.md index 9cf9b9b09..f3999dc30 100644 --- a/plugins/inputs/elasticsearch/README.md +++ b/plugins/inputs/elasticsearch/README.md @@ -23,8 +23,14 @@ or [cluster-stats](https://www.elastic.co/guide/en/elasticsearch/reference/curre ## Set cluster_health to true when you want to also obtain cluster health stats cluster_health = false - ## Set cluster_stats to true when you want to obtain cluster stats from the - ## Master node. + ## Adjust cluster_health_level when you want to also obtain detailed health stats + ## The options are + ## - indices (default) + ## - cluster + # cluster_health_level = "indices" + + ## Set cluster_stats to true when you want to also obtain cluster stats from the + ## Master node. cluster_stats = false ## Optional SSL Config diff --git a/plugins/inputs/elasticsearch/elasticsearch.go b/plugins/inputs/elasticsearch/elasticsearch.go index 5bf3d1c63..589d0fe3e 100644 --- a/plugins/inputs/elasticsearch/elasticsearch.go +++ b/plugins/inputs/elasticsearch/elasticsearch.go @@ -3,17 +3,16 @@ package elasticsearch import ( "encoding/json" "fmt" - "net/http" - "regexp" - "sync" - "time" - "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/inputs" jsonparser "github.com/influxdata/telegraf/plugins/parsers/json" "io/ioutil" + "net/http" + "regexp" "strings" + "sync" + "time" ) // mask for masking username/password from error messages @@ -94,6 +93,12 @@ const sampleConfig = ` ## Set cluster_health to true when you want to also obtain cluster health stats cluster_health = false + ## Adjust cluster_health_level when you want to also obtain detailed health stats + ## The options are + ## - indices (default) + ## - cluster + # cluster_health_level = "indices" + ## Set cluster_stats to true when you want to also obtain cluster stats from the ## Master node. cluster_stats = false @@ -113,6 +118,7 @@ type Elasticsearch struct { Servers []string HttpTimeout internal.Duration ClusterHealth bool + ClusterHealthLevel string ClusterStats bool SSLCA string `toml:"ssl_ca"` // Path to CA file SSLCert string `toml:"ssl_cert"` // Path to host cert file @@ -126,7 +132,8 @@ type Elasticsearch struct { // NewElasticsearch return a new instance of Elasticsearch func NewElasticsearch() *Elasticsearch { return &Elasticsearch{ - HttpTimeout: internal.Duration{Duration: time.Second * 5}, + HttpTimeout: internal.Duration{Duration: time.Second * 5}, + ClusterHealthLevel: "indices", } } @@ -182,7 +189,10 @@ func (e *Elasticsearch) Gather(acc telegraf.Accumulator) error { } if e.ClusterHealth { - url = s + "/_cluster/health?level=indices" + url = s + "/_cluster/health" + if e.ClusterHealthLevel != "" { + url = url + "?level=" + e.ClusterHealthLevel + } if err := e.gatherClusterHealth(url, acc); err != nil { acc.AddError(fmt.Errorf(mask.ReplaceAllString(err.Error(), "http(s)://XXX:XXX@"))) return diff --git a/plugins/inputs/elasticsearch/elasticsearch_test.go b/plugins/inputs/elasticsearch/elasticsearch_test.go index f057cfd8b..41b578c79 100644 --- a/plugins/inputs/elasticsearch/elasticsearch_test.go +++ b/plugins/inputs/elasticsearch/elasticsearch_test.go @@ -93,10 +93,11 @@ func TestGatherNodeStats(t *testing.T) { checkNodeStatsResult(t, &acc) } -func TestGatherClusterHealth(t *testing.T) { +func TestGatherClusterHealthEmptyClusterHealth(t *testing.T) { es := newElasticsearchWithClient() es.Servers = []string{"http://example.com:9200"} es.ClusterHealth = true + es.ClusterHealthLevel = "" es.client.Transport = newTransportMock(http.StatusOK, clusterHealthResponse) var acc testutil.Accumulator @@ -104,6 +105,56 @@ func TestGatherClusterHealth(t *testing.T) { checkIsMaster(es, false, t) + acc.AssertContainsTaggedFields(t, "elasticsearch_cluster_health", + clusterHealthExpected, + map[string]string{"name": "elasticsearch_telegraf"}) + + acc.AssertDoesNotContainsTaggedFields(t, "elasticsearch_indices", + v1IndexExpected, + map[string]string{"index": "v1"}) + + acc.AssertDoesNotContainsTaggedFields(t, "elasticsearch_indices", + v2IndexExpected, + map[string]string{"index": "v2"}) +} + +func TestGatherClusterHealthSpecificClusterHealth(t *testing.T) { + es := newElasticsearchWithClient() + es.Servers = []string{"http://example.com:9200"} + es.ClusterHealth = true + es.ClusterHealthLevel = "cluster" + es.client.Transport = newTransportMock(http.StatusOK, clusterHealthResponse) + + var acc testutil.Accumulator + require.NoError(t, es.gatherClusterHealth("junk", &acc)) + + checkIsMaster(es, false, t) + + acc.AssertContainsTaggedFields(t, "elasticsearch_cluster_health", + clusterHealthExpected, + map[string]string{"name": "elasticsearch_telegraf"}) + + acc.AssertDoesNotContainsTaggedFields(t, "elasticsearch_indices", + v1IndexExpected, + map[string]string{"index": "v1"}) + + acc.AssertDoesNotContainsTaggedFields(t, "elasticsearch_indices", + v2IndexExpected, + map[string]string{"index": "v2"}) +} + +func TestGatherClusterHealthAlsoIndicesHealth(t *testing.T) { + es := newElasticsearchWithClient() + es.Servers = []string{"http://example.com:9200"} + es.ClusterHealth = true + es.ClusterHealthLevel = "indices" + es.client.Transport = newTransportMock(http.StatusOK, clusterHealthResponseWithIndices) + + var acc testutil.Accumulator + require.NoError(t, es.gatherClusterHealth("junk", &acc)) + + checkIsMaster(es, false, t) + acc.AssertContainsTaggedFields(t, "elasticsearch_cluster_health", clusterHealthExpected, map[string]string{"name": "elasticsearch_telegraf"}) @@ -185,7 +236,6 @@ func TestGatherClusterStatsNonMaster(t *testing.T) { // ensure flag is clear so Cluster Stats would not be done checkIsMaster(es, false, t) checkNodeStatsResult(t, &acc) - } func newElasticsearchWithClient() *Elasticsearch { diff --git a/plugins/inputs/elasticsearch/testdata_test.go b/plugins/inputs/elasticsearch/testdata_test.go index 19ebb3bfb..d43a3f9c5 100644 --- a/plugins/inputs/elasticsearch/testdata_test.go +++ b/plugins/inputs/elasticsearch/testdata_test.go @@ -1,6 +1,21 @@ package elasticsearch const clusterHealthResponse = ` +{ + "cluster_name": "elasticsearch_telegraf", + "status": "green", + "timed_out": false, + "number_of_nodes": 3, + "number_of_data_nodes": 3, + "active_primary_shards": 5, + "active_shards": 15, + "relocating_shards": 0, + "initializing_shards": 0, + "unassigned_shards": 0 +} +` + +const clusterHealthResponseWithIndices = ` { "cluster_name": "elasticsearch_telegraf", "status": "green", From b9f319529f89c3554d177d80fa7cf996de12a25b Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 4 Oct 2017 15:30:11 -0700 Subject: [PATCH 15/95] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index a2d176959..e52e5aedf 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -39,6 +39,7 @@ - [#3281](https://github.com/influxdata/telegraf/pull/3281): Add extra wired tiger cache metrics to mongodb input. - [#3141](https://github.com/influxdata/telegraf/pull/3141): Collect Docker Swarm service metrics in docker input plugin. - [#2449](https://github.com/influxdata/telegraf/pull/2449): Add smart input plugin for collecting S.M.A.R.T. data. +- [#3269](https://github.com/influxdata/telegraf/pull/3269): Add cluster health level configuration to elasticsearch input. ### Bugfixes From 4fab572b6bec4326d03d13c38095f89a44c02b53 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 5 Oct 2017 12:12:14 -0700 Subject: [PATCH 16/95] Release buffer back to pool earlier --- plugins/inputs/statsd/statsd.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/inputs/statsd/statsd.go b/plugins/inputs/statsd/statsd.go index 67ce29cd8..db412b549 100644 --- a/plugins/inputs/statsd/statsd.go +++ b/plugins/inputs/statsd/statsd.go @@ -427,13 +427,13 @@ func (s *Statsd) parser() error { return nil case buf := <-s.in: lines := strings.Split(buf.String(), "\n") + s.bufPool.Put(buf) for _, line := range lines { line = strings.TrimSpace(line) if line != "" { s.parseStatsdLine(line) } } - s.bufPool.Put(buf) } } } From f56dda0ac8630bd510cded806cc90ec1bbfb1740 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 5 Oct 2017 16:02:21 -0700 Subject: [PATCH 17/95] Fix panic in cpu input if number of cpus changes (#3306) --- plugins/inputs/system/cpu.go | 16 +++++++++----- plugins/inputs/system/cpu_test.go | 35 +++++++++++++++++++++++++++++++ 2 files changed, 46 insertions(+), 5 deletions(-) diff --git a/plugins/inputs/system/cpu.go b/plugins/inputs/system/cpu.go index f60cb20c9..55378c93e 100644 --- a/plugins/inputs/system/cpu.go +++ b/plugins/inputs/system/cpu.go @@ -11,7 +11,7 @@ import ( type CPUStats struct { ps PS - lastStats []cpu.TimesStat + lastStats map[string]cpu.TimesStat PerCPU bool `toml:"percpu"` TotalCPU bool `toml:"totalcpu"` @@ -53,7 +53,7 @@ func (s *CPUStats) Gather(acc telegraf.Accumulator) error { } now := time.Now() - for i, cts := range times { + for _, cts := range times { tags := map[string]string{ "cpu": cts.CPU, } @@ -86,13 +86,16 @@ func (s *CPUStats) Gather(acc telegraf.Accumulator) error { // If it's the 1st gather, can't get CPU Usage stats yet continue } - lastCts := s.lastStats[i] + + lastCts, ok := s.lastStats[cts.CPU] + if !ok { + continue + } lastTotal := totalCpuTime(lastCts) lastActive := activeCpuTime(lastCts) totalDelta := total - lastTotal if totalDelta < 0 { - s.lastStats = times return fmt.Errorf("Error: current total CPU time is less than previous total CPU time") } @@ -118,7 +121,10 @@ func (s *CPUStats) Gather(acc telegraf.Accumulator) error { acc.AddGauge("cpu", fieldsG, tags, now) } - s.lastStats = times + s.lastStats = make(map[string]cpu.TimesStat) + for _, cts := range times { + s.lastStats[cts.CPU] = cts + } return nil } diff --git a/plugins/inputs/system/cpu_test.go b/plugins/inputs/system/cpu_test.go index e9dc7dda9..fabff8a7d 100644 --- a/plugins/inputs/system/cpu_test.go +++ b/plugins/inputs/system/cpu_test.go @@ -149,3 +149,38 @@ func assertContainsTaggedFloat( measurement, delta, expectedValue, actualValue) assert.Fail(t, msg) } + +// TestCPUCountChange tests that no errors are encountered if the number of +// CPUs increases as reported with LXC. +func TestCPUCountIncrease(t *testing.T) { + var mps MockPS + var mps2 MockPS + var acc testutil.Accumulator + var err error + + cs := NewCPUStats(&mps) + + mps.On("CPUTimes").Return( + []cpu.TimesStat{ + cpu.TimesStat{ + CPU: "cpu0", + }, + }, nil) + + err = cs.Gather(&acc) + require.NoError(t, err) + + mps2.On("CPUTimes").Return( + []cpu.TimesStat{ + cpu.TimesStat{ + CPU: "cpu0", + }, + cpu.TimesStat{ + CPU: "cpu1", + }, + }, nil) + cs.ps = &mps2 + + err = cs.Gather(&acc) + require.NoError(t, err) +} From 6e1fa559a35858f1130c2a4a3352616209fb58c3 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 5 Oct 2017 16:05:51 -0700 Subject: [PATCH 18/95] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index e52e5aedf..1c92a2132 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -57,6 +57,7 @@ - [#3081](https://github.com/influxdata/telegraf/issues/3081): Fix case sensitivity error in sqlserver input. - [#3297](https://github.com/influxdata/telegraf/issues/3297): Add support for proxy environment variables to http_response. - [#1588](https://github.com/influxdata/telegraf/issues/1588): Add support for standard proxy env vars in outputs. +- [#3282](https://github.com/influxdata/telegraf/issues/3282): Fix panic in cpu input if number of cpus changes. ## v1.4.1 [2017-09-26] From cce40c515afc2e9a03c073dc627cb02f88da80db Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 5 Oct 2017 16:14:21 -0700 Subject: [PATCH 19/95] Use chunked transfer encoding in InfluxDB output (#3307) --- plugins/outputs/influxdb/client/client.go | 8 +- plugins/outputs/influxdb/client/http.go | 58 +------------- plugins/outputs/influxdb/client/http_test.go | 82 +------------------- plugins/outputs/influxdb/client/udp.go | 27 ++----- plugins/outputs/influxdb/client/udp_test.go | 50 ++---------- plugins/outputs/influxdb/influxdb.go | 8 +- 6 files changed, 19 insertions(+), 214 deletions(-) diff --git a/plugins/outputs/influxdb/client/client.go b/plugins/outputs/influxdb/client/client.go index 3f52752ad..4bcaceb74 100644 --- a/plugins/outputs/influxdb/client/client.go +++ b/plugins/outputs/influxdb/client/client.go @@ -4,13 +4,7 @@ import "io" type Client interface { Query(command string) error - - Write(b []byte) (int, error) - WriteWithParams(b []byte, params WriteParams) (int, error) - - WriteStream(b io.Reader, contentLength int) (int, error) - WriteStreamWithParams(b io.Reader, contentLength int, params WriteParams) (int, error) - + WriteStream(b io.Reader) error Close() error } diff --git a/plugins/outputs/influxdb/client/http.go b/plugins/outputs/influxdb/client/http.go index 8f0a6ac24..4dea82a8d 100644 --- a/plugins/outputs/influxdb/client/http.go +++ b/plugins/outputs/influxdb/client/http.go @@ -137,60 +137,13 @@ func (c *httpClient) Query(command string) error { return c.doRequest(req, http.StatusOK) } -func (c *httpClient) Write(b []byte) (int, error) { - req, err := c.makeWriteRequest(bytes.NewReader(b), len(b), c.writeURL) +func (c *httpClient) WriteStream(r io.Reader) error { + req, err := c.makeWriteRequest(r, c.writeURL) if err != nil { - return 0, nil + return err } - err = c.doRequest(req, http.StatusNoContent) - if err == nil { - return len(b), nil - } - return 0, err -} - -func (c *httpClient) WriteWithParams(b []byte, wp WriteParams) (int, error) { - req, err := c.makeWriteRequest(bytes.NewReader(b), len(b), writeURL(c.url, wp)) - if err != nil { - return 0, nil - } - - err = c.doRequest(req, http.StatusNoContent) - if err == nil { - return len(b), nil - } - return 0, err -} - -func (c *httpClient) WriteStream(r io.Reader, contentLength int) (int, error) { - req, err := c.makeWriteRequest(r, contentLength, c.writeURL) - if err != nil { - return 0, nil - } - - err = c.doRequest(req, http.StatusNoContent) - if err == nil { - return contentLength, nil - } - return 0, err -} - -func (c *httpClient) WriteStreamWithParams( - r io.Reader, - contentLength int, - wp WriteParams, -) (int, error) { - req, err := c.makeWriteRequest(r, contentLength, writeURL(c.url, wp)) - if err != nil { - return 0, nil - } - - err = c.doRequest(req, http.StatusNoContent) - if err == nil { - return contentLength, nil - } - return 0, err + return c.doRequest(req, http.StatusNoContent) } func (c *httpClient) doRequest( @@ -232,7 +185,6 @@ func (c *httpClient) doRequest( func (c *httpClient) makeWriteRequest( body io.Reader, - contentLength int, writeURL string, ) (*http.Request, error) { req, err := c.makeRequest(writeURL, body) @@ -241,8 +193,6 @@ func (c *httpClient) makeWriteRequest( } if c.config.ContentEncoding == "gzip" { req.Header.Set("Content-Encoding", "gzip") - } else { - req.Header.Set("Content-Length", fmt.Sprint(contentLength)) } return req, nil } diff --git a/plugins/outputs/influxdb/client/http_test.go b/plugins/outputs/influxdb/client/http_test.go index ba08918af..2cb0182e8 100644 --- a/plugins/outputs/influxdb/client/http_test.go +++ b/plugins/outputs/influxdb/client/http_test.go @@ -110,66 +110,8 @@ func TestHTTPClient_Write(t *testing.T) { client, err := NewHTTP(config, wp) defer client.Close() assert.NoError(t, err) - n, err := client.Write([]byte("cpu value=99\n")) - assert.Equal(t, 13, n) - assert.NoError(t, err) - _, err = client.WriteStream(bytes.NewReader([]byte("cpu value=99\n")), 13) - assert.NoError(t, err) -} - -func TestHTTPClient_WriteParamsOverride(t *testing.T) { - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - switch r.URL.Path { - case "/write": - // test that database is set properly - if r.FormValue("db") != "override" { - w.WriteHeader(http.StatusTeapot) - w.Header().Set("Content-Type", "application/json") - fmt.Fprintln(w, `{"results":[{}],"error":"wrong db name"}`) - } - - // Validate the request body: - buf := make([]byte, 100) - n, _ := r.Body.Read(buf) - expected := "cpu value=99" - got := string(buf[0 : n-1]) - if expected != got { - w.WriteHeader(http.StatusTeapot) - w.Header().Set("Content-Type", "application/json") - msg := fmt.Sprintf(`{"results":[{}],"error":"expected [%s], got [%s]"}`, expected, got) - fmt.Fprintln(w, msg) - } - - w.WriteHeader(http.StatusNoContent) - w.Header().Set("Content-Type", "application/json") - case "/query": - w.WriteHeader(http.StatusOK) - w.Header().Set("Content-Type", "application/json") - fmt.Fprintln(w, `{"results":[{}]}`) - } - })) - defer ts.Close() - - config := HTTPConfig{ - URL: ts.URL, - } - defaultWP := WriteParams{ - Database: "test", - } - client, err := NewHTTP(config, defaultWP) - defer client.Close() - assert.NoError(t, err) - - // test that WriteWithParams overrides the default write params - wp := WriteParams{ - Database: "override", - } - n, err := client.WriteWithParams([]byte("cpu value=99\n"), wp) - assert.Equal(t, 13, n) - assert.NoError(t, err) - - _, err = client.WriteStreamWithParams(bytes.NewReader([]byte("cpu value=99\n")), 13, wp) + err = client.WriteStream(bytes.NewReader([]byte("cpu value=99\n"))) assert.NoError(t, err) } @@ -197,23 +139,7 @@ func TestHTTPClient_Write_Errors(t *testing.T) { assert.NoError(t, err) lp := []byte("cpu value=99\n") - n, err := client.Write(lp) - assert.Equal(t, 0, n) - assert.Error(t, err) - - n, err = client.WriteStream(bytes.NewReader(lp), 13) - assert.Equal(t, 0, n) - assert.Error(t, err) - - wp := WriteParams{ - Database: "override", - } - n, err = client.WriteWithParams(lp, wp) - assert.Equal(t, 0, n) - assert.Error(t, err) - - n, err = client.WriteStreamWithParams(bytes.NewReader(lp), 13, wp) - assert.Equal(t, 0, n) + err = client.WriteStream(bytes.NewReader(lp)) assert.Error(t, err) } @@ -404,8 +330,6 @@ func TestHTTPClient_PathPrefix(t *testing.T) { assert.NoError(t, err) err = client.Query("CREATE DATABASE test") assert.NoError(t, err) - _, err = client.Write([]byte("cpu value=99\n")) - assert.NoError(t, err) - _, err = client.WriteStream(bytes.NewReader([]byte("cpu value=99\n")), 13) + err = client.WriteStream(bytes.NewReader([]byte("cpu value=99\n"))) assert.NoError(t, err) } diff --git a/plugins/outputs/influxdb/client/udp.go b/plugins/outputs/influxdb/client/udp.go index 1dd4d9936..786b047fa 100644 --- a/plugins/outputs/influxdb/client/udp.go +++ b/plugins/outputs/influxdb/client/udp.go @@ -1,7 +1,6 @@ package client import ( - "bytes" "fmt" "io" "log" @@ -62,18 +61,8 @@ func (c *udpClient) Query(command string) error { return nil } -// Write will send the byte stream to the given UDP client endpoint -func (c *udpClient) Write(b []byte) (int, error) { - return c.WriteStream(bytes.NewReader(b), -1) -} - -// WriteWithParams are ignored by the UDP client, will forward to WriteStream -func (c *udpClient) WriteWithParams(b []byte, wp WriteParams) (int, error) { - return c.WriteStream(bytes.NewReader(b), -1) -} - // WriteStream will send the provided data through to the client, contentLength is ignored by the UDP client -func (c *udpClient) WriteStream(r io.Reader, contentLength int) (int, error) { +func (c *udpClient) WriteStream(r io.Reader) error { var totaln int for { nR, err := r.Read(c.buffer) @@ -81,14 +70,14 @@ func (c *udpClient) WriteStream(r io.Reader, contentLength int) (int, error) { break } if err != io.EOF && err != nil { - return totaln, err + return err } if c.buffer[nR-1] == uint8('\n') { nW, err := c.conn.Write(c.buffer[0:nR]) totaln += nW if err != nil { - return totaln, err + return err } } else { log.Printf("E! Could not fit point into UDP payload; dropping") @@ -99,7 +88,7 @@ func (c *udpClient) WriteStream(r io.Reader, contentLength int) (int, error) { break } if err != io.EOF && err != nil { - return totaln, err + return err } if c.buffer[nR-1] == uint8('\n') { break @@ -107,13 +96,7 @@ func (c *udpClient) WriteStream(r io.Reader, contentLength int) (int, error) { } } } - return totaln, nil -} - -// WriteStreamWithParams will forward the stream to the client backend, contentLength is ignored by the UDP client -// write params are ignored by the UDP client -func (c *udpClient) WriteStreamWithParams(r io.Reader, contentLength int, wp WriteParams) (int, error) { - return c.WriteStream(r, -1) + return nil } // Close will terminate the provided client connection diff --git a/plugins/outputs/influxdb/client/udp_test.go b/plugins/outputs/influxdb/client/udp_test.go index 9308144b5..545f142f5 100644 --- a/plugins/outputs/influxdb/client/udp_test.go +++ b/plugins/outputs/influxdb/client/udp_test.go @@ -9,7 +9,6 @@ import ( "github.com/influxdata/telegraf/metric" "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) func TestUDPClient(t *testing.T) { @@ -65,43 +64,6 @@ func TestUDPClient_Write(t *testing.T) { } }() - // test sending simple metric - n, err := client.Write([]byte("cpu value=99\n")) - assert.Equal(t, n, 13) - assert.NoError(t, err) - pkt := <-packets - assert.Equal(t, "cpu value=99\n", pkt) - - wp := WriteParams{} - // - // Using WriteStream() & a metric.Reader: - config3 := UDPConfig{ - URL: "udp://localhost:8199", - PayloadSize: 40, - } - client3, err := NewUDP(config3) - assert.NoError(t, err) - - now := time.Unix(1484142942, 0) - m1, _ := metric.New("test", map[string]string{}, - map[string]interface{}{"value": 1.1}, now) - m2, _ := metric.New("test", map[string]string{}, - map[string]interface{}{"value": 1.1}, now) - m3, _ := metric.New("test", map[string]string{}, - map[string]interface{}{"value": 1.1}, now) - ms := []telegraf.Metric{m1, m2, m3} - mReader := metric.NewReader(ms) - n, err = client3.WriteStreamWithParams(mReader, 10, wp) - // 3 metrics at 35 bytes each (including the newline) - assert.Equal(t, 105, n) - assert.NoError(t, err) - pkt = <-packets - assert.Equal(t, "test value=1.1 1484142942000000000\n", pkt) - pkt = <-packets - assert.Equal(t, "test value=1.1 1484142942000000000\n", pkt) - pkt = <-packets - assert.Equal(t, "test value=1.1 1484142942000000000\n", pkt) - assert.NoError(t, client.Close()) config = UDPConfig{ @@ -112,17 +74,15 @@ func TestUDPClient_Write(t *testing.T) { assert.NoError(t, err) ts := time.Unix(1484142943, 0) - m1, _ = metric.New("test", map[string]string{}, + m1, _ := metric.New("test", map[string]string{}, map[string]interface{}{"this_is_a_very_long_field_name": 1.1}, ts) - m2, _ = metric.New("test", map[string]string{}, + m2, _ := metric.New("test", map[string]string{}, map[string]interface{}{"value": 1.1}, ts) - ms = []telegraf.Metric{m1, m2} + ms := []telegraf.Metric{m1, m2} reader := metric.NewReader(ms) - n, err = client4.WriteStream(reader, 0) + err = client4.WriteStream(reader) assert.NoError(t, err) - require.Equal(t, 35, n) - assert.NoError(t, err) - pkt = <-packets + pkt := <-packets assert.Equal(t, "test value=1.1 1484142943000000000\n", pkt) assert.NoError(t, client4.Close()) diff --git a/plugins/outputs/influxdb/influxdb.go b/plugins/outputs/influxdb/influxdb.go index ac5faf455..71320d01e 100644 --- a/plugins/outputs/influxdb/influxdb.go +++ b/plugins/outputs/influxdb/influxdb.go @@ -183,12 +183,6 @@ func (i *InfluxDB) Description() string { // Write will choose a random server in the cluster to write to until a successful write // occurs, logging each unsuccessful. If all servers fail, return error. func (i *InfluxDB) Write(metrics []telegraf.Metric) error { - - bufsize := 0 - for _, m := range metrics { - bufsize += m.Len() - } - r := metric.NewReader(metrics) // This will get set to nil if a successful write occurs @@ -196,7 +190,7 @@ func (i *InfluxDB) Write(metrics []telegraf.Metric) error { p := rand.Perm(len(i.clients)) for _, n := range p { - if _, e := i.clients[n].WriteStream(r, bufsize); e != nil { + if e := i.clients[n].WriteStream(r); e != nil { // If the database was not found, try to recreate it: if strings.Contains(e.Error(), "database not found") { errc := i.clients[n].Query(fmt.Sprintf(`CREATE DATABASE "%s"`, qiReplacer.Replace(i.Database))) From 13c7802b8461caa1624a85a2bdd6f7fcb86bd688 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 5 Oct 2017 16:15:43 -0700 Subject: [PATCH 20/95] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 1c92a2132..78bb8d0c3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -58,6 +58,7 @@ - [#3297](https://github.com/influxdata/telegraf/issues/3297): Add support for proxy environment variables to http_response. - [#1588](https://github.com/influxdata/telegraf/issues/1588): Add support for standard proxy env vars in outputs. - [#3282](https://github.com/influxdata/telegraf/issues/3282): Fix panic in cpu input if number of cpus changes. +- [#2854](https://github.com/influxdata/telegraf/issues/2854): Use chunked transfer encoding in InfluxDB output. ## v1.4.1 [2017-09-26] From 59bb31e7651e14fd7694b678a3a8cbd169788d5a Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 5 Oct 2017 16:19:53 -0700 Subject: [PATCH 21/95] Use golang 1.9.1 --- circle.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/circle.yml b/circle.yml index 117d03ad3..314903e9d 100644 --- a/circle.yml +++ b/circle.yml @@ -6,8 +6,8 @@ machine: - rabbitmq-server post: - sudo rm -rf /usr/local/go - - wget https://storage.googleapis.com/golang/go1.9.linux-amd64.tar.gz - - sudo tar -C /usr/local -xzf go1.9.linux-amd64.tar.gz + - wget https://storage.googleapis.com/golang/go1.9.1.linux-amd64.tar.gz + - sudo tar -C /usr/local -xzf go1.9.1.linux-amd64.tar.gz - go version dependencies: From 75567d5b517df37b3e770dac271a1395bdcc3d50 Mon Sep 17 00:00:00 2001 From: Christian Meilke Date: Sat, 7 Oct 2017 01:16:32 +0200 Subject: [PATCH 22/95] Add ability to limit node stats in elasticsearch input (#3304) --- plugins/inputs/elasticsearch/README.md | 5 + plugins/inputs/elasticsearch/elasticsearch.go | 34 +++++-- .../elasticsearch/elasticsearch_test.go | 45 +++++++-- plugins/inputs/elasticsearch/testdata_test.go | 94 +++++++++++++++++++ 4 files changed, 164 insertions(+), 14 deletions(-) diff --git a/plugins/inputs/elasticsearch/README.md b/plugins/inputs/elasticsearch/README.md index f3999dc30..5698cc7f0 100644 --- a/plugins/inputs/elasticsearch/README.md +++ b/plugins/inputs/elasticsearch/README.md @@ -33,6 +33,11 @@ or [cluster-stats](https://www.elastic.co/guide/en/elasticsearch/reference/curre ## Master node. cluster_stats = false + ## node_stats is a list of sub-stats that you want to have gathered. Valid options + ## are "indices", "os", "process", "jvm", "thread_pool", "fs", "transport", "http", + ## "breakers". Per default, all stats are gathered. + # node_stats = ["jvm", "http"] + ## Optional SSL Config # ssl_ca = "/etc/telegraf/ca.pem" # ssl_cert = "/etc/telegraf/cert.pem" diff --git a/plugins/inputs/elasticsearch/elasticsearch.go b/plugins/inputs/elasticsearch/elasticsearch.go index 589d0fe3e..f5ddef5fb 100644 --- a/plugins/inputs/elasticsearch/elasticsearch.go +++ b/plugins/inputs/elasticsearch/elasticsearch.go @@ -103,6 +103,11 @@ const sampleConfig = ` ## Master node. cluster_stats = false + ## node_stats is a list of sub-stats that you want to have gathered. Valid options + ## are "indices", "os", "process", "jvm", "thread_pool", "fs", "transport", "http", + ## "breakers". Per default, all stats are gathered. + # node_stats = ["jvm", "http"] + ## Optional SSL Config # ssl_ca = "/etc/telegraf/ca.pem" # ssl_cert = "/etc/telegraf/cert.pem" @@ -120,6 +125,7 @@ type Elasticsearch struct { ClusterHealth bool ClusterHealthLevel string ClusterStats bool + NodeStats []string SSLCA string `toml:"ssl_ca"` // Path to CA file SSLCert string `toml:"ssl_cert"` // Path to host cert file SSLKey string `toml:"ssl_key"` // Path to cert key file @@ -165,12 +171,7 @@ func (e *Elasticsearch) Gather(acc telegraf.Accumulator) error { for _, serv := range e.Servers { go func(s string, acc telegraf.Accumulator) { defer wg.Done() - var url string - if e.Local { - url = s + statsPathLocal - } else { - url = s + statsPath - } + url := e.nodeStatsUrl(s) e.isMaster = false if e.ClusterStats { @@ -229,6 +230,22 @@ func (e *Elasticsearch) createHttpClient() (*http.Client, error) { return client, nil } +func (e *Elasticsearch) nodeStatsUrl(baseUrl string) string { + var url string + + if e.Local { + url = baseUrl + statsPathLocal + } else { + url = baseUrl + statsPath + } + + if len(e.NodeStats) == 0 { + return url + } + + return fmt.Sprintf("%s/%s", url, strings.Join(e.NodeStats, ",")) +} + func (e *Elasticsearch) gatherNodeStats(url string, acc telegraf.Accumulator) error { nodeStats := &struct { ClusterName string `json:"cluster_name"` @@ -269,6 +286,11 @@ func (e *Elasticsearch) gatherNodeStats(url string, acc telegraf.Accumulator) er now := time.Now() for p, s := range stats { + // if one of the individual node stats is not even in the + // original result + if s == nil { + continue + } f := jsonparser.JSONFlattener{} // parse Json, ignoring strings and bools err := f.FlattenJSON("", s) diff --git a/plugins/inputs/elasticsearch/elasticsearch_test.go b/plugins/inputs/elasticsearch/elasticsearch_test.go index 41b578c79..1616bfeb2 100644 --- a/plugins/inputs/elasticsearch/elasticsearch_test.go +++ b/plugins/inputs/elasticsearch/elasticsearch_test.go @@ -13,6 +13,16 @@ import ( "github.com/stretchr/testify/require" ) +func defaultTags() map[string]string { + return map[string]string{ + "cluster_name": "es-testcluster", + "node_attribute_master": "true", + "node_id": "SDFsfSDFsdfFSDSDfSFDSDF", + "node_name": "test.host.com", + "node_host": "test", + } +} + type transportMock struct { statusCode int body string @@ -45,15 +55,9 @@ func checkIsMaster(es *Elasticsearch, expected bool, t *testing.T) { assert.Fail(t, msg) } } -func checkNodeStatsResult(t *testing.T, acc *testutil.Accumulator) { - tags := map[string]string{ - "cluster_name": "es-testcluster", - "node_attribute_master": "true", - "node_id": "SDFsfSDFsdfFSDSDfSFDSDF", - "node_name": "test.host.com", - "node_host": "test", - } +func checkNodeStatsResult(t *testing.T, acc *testutil.Accumulator) { + tags := defaultTags() acc.AssertContainsTaggedFields(t, "elasticsearch_indices", nodestatsIndicesExpected, tags) acc.AssertContainsTaggedFields(t, "elasticsearch_os", nodestatsOsExpected, tags) acc.AssertContainsTaggedFields(t, "elasticsearch_process", nodestatsProcessExpected, tags) @@ -79,6 +83,31 @@ func TestGather(t *testing.T) { checkNodeStatsResult(t, &acc) } +func TestGatherIndividualStats(t *testing.T) { + es := newElasticsearchWithClient() + es.Servers = []string{"http://example.com:9200"} + es.NodeStats = []string{"jvm", "process"} + es.client.Transport = newTransportMock(http.StatusOK, nodeStatsResponseJVMProcess) + + var acc testutil.Accumulator + if err := acc.GatherError(es.Gather); err != nil { + t.Fatal(err) + } + + checkIsMaster(es, false, t) + + tags := defaultTags() + acc.AssertDoesNotContainsTaggedFields(t, "elasticsearch_indices", nodestatsIndicesExpected, tags) + acc.AssertDoesNotContainsTaggedFields(t, "elasticsearch_os", nodestatsOsExpected, tags) + acc.AssertContainsTaggedFields(t, "elasticsearch_process", nodestatsProcessExpected, tags) + acc.AssertContainsTaggedFields(t, "elasticsearch_jvm", nodestatsJvmExpected, tags) + acc.AssertDoesNotContainsTaggedFields(t, "elasticsearch_thread_pool", nodestatsThreadPoolExpected, tags) + acc.AssertDoesNotContainsTaggedFields(t, "elasticsearch_fs", nodestatsFsExpected, tags) + acc.AssertDoesNotContainsTaggedFields(t, "elasticsearch_transport", nodestatsTransportExpected, tags) + acc.AssertDoesNotContainsTaggedFields(t, "elasticsearch_http", nodestatsHttpExpected, tags) + acc.AssertDoesNotContainsTaggedFields(t, "elasticsearch_breakers", nodestatsBreakersExpected, tags) +} + func TestGatherNodeStats(t *testing.T) { es := newElasticsearchWithClient() es.Servers = []string{"http://example.com:9200"} diff --git a/plugins/inputs/elasticsearch/testdata_test.go b/plugins/inputs/elasticsearch/testdata_test.go index d43a3f9c5..6e487ff3f 100644 --- a/plugins/inputs/elasticsearch/testdata_test.go +++ b/plugins/inputs/elasticsearch/testdata_test.go @@ -504,6 +504,100 @@ const nodeStatsResponse = ` } ` +const nodeStatsResponseJVMProcess = ` +{ + "cluster_name": "es-testcluster", + "nodes": { + "SDFsfSDFsdfFSDSDfSFDSDF": { + "timestamp": 1436365550135, + "name": "test.host.com", + "transport_address": "inet[/127.0.0.1:9300]", + "host": "test", + "ip": [ + "inet[/127.0.0.1:9300]", + "NONE" + ], + "attributes": { + "master": "true" + }, + "process": { + "timestamp": 1436460392945, + "open_file_descriptors": 160, + "cpu": { + "percent": 2, + "sys_in_millis": 1870, + "user_in_millis": 13610, + "total_in_millis": 15480 + }, + "mem": { + "total_virtual_in_bytes": 4747890688 + } + }, + "jvm": { + "timestamp": 1436460392945, + "uptime_in_millis": 202245, + "mem": { + "heap_used_in_bytes": 52709568, + "heap_used_percent": 5, + "heap_committed_in_bytes": 259522560, + "heap_max_in_bytes": 1038876672, + "non_heap_used_in_bytes": 39634576, + "non_heap_committed_in_bytes": 40841216, + "pools": { + "young": { + "used_in_bytes": 32685760, + "max_in_bytes": 279183360, + "peak_used_in_bytes": 71630848, + "peak_max_in_bytes": 279183360 + }, + "survivor": { + "used_in_bytes": 8912880, + "max_in_bytes": 34865152, + "peak_used_in_bytes": 8912888, + "peak_max_in_bytes": 34865152 + }, + "old": { + "used_in_bytes": 11110928, + "max_in_bytes": 724828160, + "peak_used_in_bytes": 14354608, + "peak_max_in_bytes": 724828160 + } + } + }, + "threads": { + "count": 44, + "peak_count": 45 + }, + "gc": { + "collectors": { + "young": { + "collection_count": 2, + "collection_time_in_millis": 98 + }, + "old": { + "collection_count": 1, + "collection_time_in_millis": 24 + } + } + }, + "buffer_pools": { + "direct": { + "count": 40, + "used_in_bytes": 6304239, + "total_capacity_in_bytes": 6304239 + }, + "mapped": { + "count": 0, + "used_in_bytes": 0, + "total_capacity_in_bytes": 0 + } + } + } + } + } +} +` + var nodestatsIndicesExpected = map[string]interface{}{ "id_cache_memory_size_in_bytes": float64(0), "completion_size_in_bytes": float64(0), From 4093bc98b7c711c40b8b6007d1793dfb8c51d105 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 6 Oct 2017 16:17:09 -0700 Subject: [PATCH 23/95] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 78bb8d0c3..56b6daae1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -40,6 +40,7 @@ - [#3141](https://github.com/influxdata/telegraf/pull/3141): Collect Docker Swarm service metrics in docker input plugin. - [#2449](https://github.com/influxdata/telegraf/pull/2449): Add smart input plugin for collecting S.M.A.R.T. data. - [#3269](https://github.com/influxdata/telegraf/pull/3269): Add cluster health level configuration to elasticsearch input. +- [#3304](https://github.com/influxdata/telegraf/pull/3304): Add ability to limit node stats in elasticsearch input. ### Bugfixes From 0f452ad0df4d72d9dfb32d8e36076b765185aef3 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 6 Oct 2017 16:57:57 -0700 Subject: [PATCH 24/95] Document /etc/default/telegraf file --- docs/CONFIGURATION.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/docs/CONFIGURATION.md b/docs/CONFIGURATION.md index d2cffbe07..6c48d2d84 100644 --- a/docs/CONFIGURATION.md +++ b/docs/CONFIGURATION.md @@ -24,6 +24,9 @@ Environment variables can be used anywhere in the config file, simply prepend them with $. For strings the variable must be within quotes (ie, "$STR_VAR"), for numbers and booleans they should be plain (ie, $INT_VAR, $BOOL_VAR) +When using the `.deb` or `.rpm` packages, you can define environment variables +in the `/etc/default/telegraf` file. + ## Configuration file locations The location of the configuration file can be set via the `--config` command From 761544f56d90764b0bf5ab836a8c3ec79e5445b0 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 9 Oct 2017 15:02:57 -0700 Subject: [PATCH 25/95] Add HasPoint method to testutil.Accumulator --- plugins/inputs/filestat/filestat_test.go | 92 ++++++++++++------------ testutil/accumulator.go | 25 +++++++ 2 files changed, 69 insertions(+), 48 deletions(-) diff --git a/plugins/inputs/filestat/filestat_test.go b/plugins/inputs/filestat/filestat_test.go index 83d3c87cc..56d47ff7c 100644 --- a/plugins/inputs/filestat/filestat_test.go +++ b/plugins/inputs/filestat/filestat_test.go @@ -5,6 +5,8 @@ import ( "strings" "testing" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/assert" ) @@ -24,28 +26,24 @@ func TestGatherNoMd5(t *testing.T) { tags1 := map[string]string{ "file": dir + "log1.log", } - fields1 := map[string]interface{}{ - "size_bytes": int64(0), - "exists": int64(1), - } - acc.AssertContainsTaggedFields(t, "filestat", fields1, tags1) + require.True(t, acc.HasPoint("filestat", tags1, + "size_bytes", int64(0))) + require.True(t, acc.HasPoint("filestat", tags1, + "exists", int64(1))) tags2 := map[string]string{ "file": dir + "log2.log", } - fields2 := map[string]interface{}{ - "size_bytes": int64(0), - "exists": int64(1), - } - acc.AssertContainsTaggedFields(t, "filestat", fields2, tags2) + require.True(t, acc.HasPoint("filestat", tags2, + "size_bytes", int64(0))) + require.True(t, acc.HasPoint("filestat", tags2, + "exists", int64(1))) tags3 := map[string]string{ "file": "/non/existant/file", } - fields3 := map[string]interface{}{ - "exists": int64(0), - } - acc.AssertContainsTaggedFields(t, "filestat", fields3, tags3) + require.True(t, acc.HasPoint("filestat", tags3, + "exists", int64(0))) } func TestGatherExplicitFiles(t *testing.T) { @@ -64,30 +62,28 @@ func TestGatherExplicitFiles(t *testing.T) { tags1 := map[string]string{ "file": dir + "log1.log", } - fields1 := map[string]interface{}{ - "size_bytes": int64(0), - "exists": int64(1), - "md5_sum": "d41d8cd98f00b204e9800998ecf8427e", - } - acc.AssertContainsTaggedFields(t, "filestat", fields1, tags1) + require.True(t, acc.HasPoint("filestat", tags1, + "size_bytes", int64(0))) + require.True(t, acc.HasPoint("filestat", tags1, + "exists", int64(1))) + require.True(t, acc.HasPoint("filestat", tags1, + "md5_sum", "d41d8cd98f00b204e9800998ecf8427e")) tags2 := map[string]string{ "file": dir + "log2.log", } - fields2 := map[string]interface{}{ - "size_bytes": int64(0), - "exists": int64(1), - "md5_sum": "d41d8cd98f00b204e9800998ecf8427e", - } - acc.AssertContainsTaggedFields(t, "filestat", fields2, tags2) + require.True(t, acc.HasPoint("filestat", tags2, + "size_bytes", int64(0))) + require.True(t, acc.HasPoint("filestat", tags2, + "exists", int64(1))) + require.True(t, acc.HasPoint("filestat", tags2, + "md5_sum", "d41d8cd98f00b204e9800998ecf8427e")) tags3 := map[string]string{ "file": "/non/existant/file", } - fields3 := map[string]interface{}{ - "exists": int64(0), - } - acc.AssertContainsTaggedFields(t, "filestat", fields3, tags3) + require.True(t, acc.HasPoint("filestat", tags3, + "exists", int64(0))) } func TestGatherGlob(t *testing.T) { @@ -136,32 +132,32 @@ func TestGatherSuperAsterisk(t *testing.T) { tags1 := map[string]string{ "file": dir + "log1.log", } - fields1 := map[string]interface{}{ - "size_bytes": int64(0), - "exists": int64(1), - "md5_sum": "d41d8cd98f00b204e9800998ecf8427e", - } - acc.AssertContainsTaggedFields(t, "filestat", fields1, tags1) + require.True(t, acc.HasPoint("filestat", tags1, + "size_bytes", int64(0))) + require.True(t, acc.HasPoint("filestat", tags1, + "exists", int64(1))) + require.True(t, acc.HasPoint("filestat", tags1, + "md5_sum", "d41d8cd98f00b204e9800998ecf8427e")) tags2 := map[string]string{ "file": dir + "log2.log", } - fields2 := map[string]interface{}{ - "size_bytes": int64(0), - "exists": int64(1), - "md5_sum": "d41d8cd98f00b204e9800998ecf8427e", - } - acc.AssertContainsTaggedFields(t, "filestat", fields2, tags2) + require.True(t, acc.HasPoint("filestat", tags2, + "size_bytes", int64(0))) + require.True(t, acc.HasPoint("filestat", tags2, + "exists", int64(1))) + require.True(t, acc.HasPoint("filestat", tags2, + "md5_sum", "d41d8cd98f00b204e9800998ecf8427e")) tags3 := map[string]string{ "file": dir + "test.conf", } - fields3 := map[string]interface{}{ - "size_bytes": int64(104), - "exists": int64(1), - "md5_sum": "5a7e9b77fa25e7bb411dbd17cf403c1f", - } - acc.AssertContainsTaggedFields(t, "filestat", fields3, tags3) + require.True(t, acc.HasPoint("filestat", tags3, + "size_bytes", int64(104))) + require.True(t, acc.HasPoint("filestat", tags3, + "exists", int64(1))) + require.True(t, acc.HasPoint("filestat", tags3, + "md5_sum", "5a7e9b77fa25e7bb411dbd17cf403c1f")) } func TestGetMd5(t *testing.T) { diff --git a/testutil/accumulator.go b/testutil/accumulator.go index 9a8eef9e3..c478400eb 100644 --- a/testutil/accumulator.go +++ b/testutil/accumulator.go @@ -297,6 +297,31 @@ func (a *Accumulator) AssertContainsFields( assert.Fail(t, msg) } +func (a *Accumulator) HasPoint( + measurement string, + tags map[string]string, + fieldKey string, + fieldValue interface{}, +) bool { + a.Lock() + defer a.Unlock() + for _, p := range a.Metrics { + if p.Measurement != measurement { + continue + } + + if !reflect.DeepEqual(tags, p.Tags) { + continue + } + + v, ok := p.Fields[fieldKey] + if ok && reflect.DeepEqual(v, fieldValue) { + return true + } + } + return false +} + func (a *Accumulator) AssertDoesNotContainMeasurement(t *testing.T, measurement string) { a.Lock() defer a.Unlock() From 61b0336d97543dc7cc8d32a77e7892ded36fdc24 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 9 Oct 2017 15:09:07 -0700 Subject: [PATCH 26/95] Use 5 second timeout overhead when waiting for ping to complete --- plugins/inputs/ping/ping.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/inputs/ping/ping.go b/plugins/inputs/ping/ping.go index dffa88751..dcbb2c286 100644 --- a/plugins/inputs/ping/ping.go +++ b/plugins/inputs/ping/ping.go @@ -145,7 +145,7 @@ func hostPinger(timeout float64, args ...string) (string, error) { } c := exec.Command(bin, args...) out, err := internal.CombinedOutputTimeout(c, - time.Second*time.Duration(timeout+1)) + time.Second*time.Duration(timeout+5)) return string(out), err } From c7a6d4eaa40d4cd44412e9d632f6d315f4d752f2 Mon Sep 17 00:00:00 2001 From: Pierre Tessier Date: Tue, 10 Oct 2017 11:21:46 -0700 Subject: [PATCH 27/95] Fix link for wavefront plugin in changelog (#3317) --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 56b6daae1..5784a87b3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,7 +4,7 @@ - [jolokia2](./plugins/inputs/jolokia2/README.md) - Thanks to @dylanmei - [nginx_plus](./plugins/inputs/nginx_plus/README.md) - Thanks to @mplonka & @poblahblahblah - [smart](./plugins/inputs/smart/README.md) - Thanks to @rickard-von-essen -- [wavefront](./plugins/inputs/wavefront/README.md) - Thanks to @puckpuck +- [wavefront](./plugins/outputs/wavefront/README.md) - Thanks to @puckpuck ### Release Notes From b641f06552cdc7a1ce6cac8c8080af04f56da70a Mon Sep 17 00:00:00 2001 From: Toni Moreno Date: Tue, 10 Oct 2017 21:02:01 +0200 Subject: [PATCH 28/95] Add new basicstats aggregator (#2167) --- plugins/aggregators/all/all.go | 1 + plugins/aggregators/basicstats/README.md | 43 +++++ plugins/aggregators/basicstats/basicstats.go | 155 ++++++++++++++++++ .../aggregators/basicstats/basicstats_test.go | 151 +++++++++++++++++ 4 files changed, 350 insertions(+) create mode 100644 plugins/aggregators/basicstats/README.md create mode 100644 plugins/aggregators/basicstats/basicstats.go create mode 100644 plugins/aggregators/basicstats/basicstats_test.go diff --git a/plugins/aggregators/all/all.go b/plugins/aggregators/all/all.go index c4d430cc9..98aecb83f 100644 --- a/plugins/aggregators/all/all.go +++ b/plugins/aggregators/all/all.go @@ -1,6 +1,7 @@ package all import ( + _ "github.com/influxdata/telegraf/plugins/aggregators/basicstats" _ "github.com/influxdata/telegraf/plugins/aggregators/histogram" _ "github.com/influxdata/telegraf/plugins/aggregators/minmax" ) diff --git a/plugins/aggregators/basicstats/README.md b/plugins/aggregators/basicstats/README.md new file mode 100644 index 000000000..0e3e5558c --- /dev/null +++ b/plugins/aggregators/basicstats/README.md @@ -0,0 +1,43 @@ +# BasicStats Aggregator Plugin + +The BasicStats aggregator plugin give us count,max,min,mean,s2(variance), stdev for a set of values, +emitting the aggregate every `period` seconds. + +### Configuration: + +```toml +# Keep the aggregate basicstats of each metric passing through. +[[aggregators.basicstats]] + ## General Aggregator Arguments: + ## The period on which to flush & clear the aggregator. + period = "30s" + ## If true, the original metric will be dropped by the + ## aggregator and will not get sent to the output plugins. + drop_original = false +``` + +### Measurements & Fields: + +- measurement1 + - field1_count + - field1_max + - field1_min + - field1_mean + - field1_s2 (variance) + - field1_stdev (standard deviation) + +### Tags: + +No tags are applied by this aggregator. + +### Example Output: + +``` +$ telegraf --config telegraf.conf --quiet +system,host=tars load1=1 1475583980000000000 +system,host=tars load1=1 1475583990000000000 +system,host=tars load1_count=2,load1_max=1,load1_min=1,load1_mean=1,load1_s2=0,load1_stdev=0 1475584010000000000 +system,host=tars load1=1 1475584020000000000 +system,host=tars load1=3 1475584030000000000 +system,host=tars load1_count=2,load1_max=3,load1_min=1,load1_mean=2,load1_s2=2,load1_stdev=1.414162 1475584010000000000 +``` diff --git a/plugins/aggregators/basicstats/basicstats.go b/plugins/aggregators/basicstats/basicstats.go new file mode 100644 index 000000000..40d65c873 --- /dev/null +++ b/plugins/aggregators/basicstats/basicstats.go @@ -0,0 +1,155 @@ +package basicstats + +import ( + "math" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/aggregators" +) + +type BasicStats struct { + cache map[uint64]aggregate +} + +func NewBasicStats() telegraf.Aggregator { + mm := &BasicStats{} + mm.Reset() + return mm +} + +type aggregate struct { + fields map[string]basicstats + name string + tags map[string]string +} + +type basicstats struct { + count float64 + min float64 + max float64 + mean float64 + M2 float64 //intermedia value for variance/stdev +} + +var sampleConfig = ` + ## General Aggregator Arguments: + ## The period on which to flush & clear the aggregator. + period = "30s" + ## If true, the original metric will be dropped by the + ## aggregator and will not get sent to the output plugins. + drop_original = false +` + +func (m *BasicStats) SampleConfig() string { + return sampleConfig +} + +func (m *BasicStats) Description() string { + return "Keep the aggregate basicstats of each metric passing through." +} + +func (m *BasicStats) Add(in telegraf.Metric) { + id := in.HashID() + if _, ok := m.cache[id]; !ok { + // hit an uncached metric, create caches for first time: + a := aggregate{ + name: in.Name(), + tags: in.Tags(), + fields: make(map[string]basicstats), + } + for k, v := range in.Fields() { + if fv, ok := convert(v); ok { + a.fields[k] = basicstats{ + count: 1, + min: fv, + max: fv, + mean: fv, + M2: 0.0, + } + } + } + m.cache[id] = a + } else { + for k, v := range in.Fields() { + if fv, ok := convert(v); ok { + if _, ok := m.cache[id].fields[k]; !ok { + // hit an uncached field of a cached metric + m.cache[id].fields[k] = basicstats{ + count: 1, + min: fv, + max: fv, + mean: fv, + M2: 0.0, + } + continue + } + + tmp := m.cache[id].fields[k] + //https://en.m.wikipedia.org/wiki/Algorithms_for_calculating_variance + //variable initialization + x := fv + mean := tmp.mean + M2 := tmp.M2 + //counter compute + n := tmp.count + 1 + tmp.count = n + //mean compute + delta := x - mean + mean = mean + delta/n + tmp.mean = mean + //variance/stdev compute + M2 = M2 + delta*(x-mean) + tmp.M2 = M2 + //max/min compute + if fv < tmp.min { + tmp.min = fv + } else if fv > tmp.max { + tmp.max = fv + } + //store final data + m.cache[id].fields[k] = tmp + } + } + } +} + +func (m *BasicStats) Push(acc telegraf.Accumulator) { + for _, aggregate := range m.cache { + fields := map[string]interface{}{} + for k, v := range aggregate.fields { + fields[k+"_count"] = v.count + fields[k+"_min"] = v.min + fields[k+"_max"] = v.max + fields[k+"_mean"] = v.mean + //v.count always >=1 + if v.count > 1 { + variance := v.M2 / (v.count - 1) + fields[k+"_s2"] = variance + fields[k+"_stdev"] = math.Sqrt(variance) + } + //if count == 1 StdDev = infinite => so I won't send data + } + acc.AddFields(aggregate.name, fields, aggregate.tags) + } +} + +func (m *BasicStats) Reset() { + m.cache = make(map[uint64]aggregate) +} + +func convert(in interface{}) (float64, bool) { + switch v := in.(type) { + case float64: + return v, true + case int64: + return float64(v), true + default: + return 0, false + } +} + +func init() { + aggregators.Add("basicstats", func() telegraf.Aggregator { + return NewBasicStats() + }) +} diff --git a/plugins/aggregators/basicstats/basicstats_test.go b/plugins/aggregators/basicstats/basicstats_test.go new file mode 100644 index 000000000..74237c0c7 --- /dev/null +++ b/plugins/aggregators/basicstats/basicstats_test.go @@ -0,0 +1,151 @@ +package basicstats + +import ( + "math" + "testing" + "time" + + "github.com/influxdata/telegraf/metric" + "github.com/influxdata/telegraf/testutil" +) + +var m1, _ = metric.New("m1", + map[string]string{"foo": "bar"}, + map[string]interface{}{ + "a": int64(1), + "b": int64(1), + "c": float64(2), + "d": float64(2), + }, + time.Now(), +) +var m2, _ = metric.New("m1", + map[string]string{"foo": "bar"}, + map[string]interface{}{ + "a": int64(1), + "b": int64(3), + "c": float64(4), + "d": float64(6), + "e": float64(200), + "ignoreme": "string", + "andme": true, + }, + time.Now(), +) + +func BenchmarkApply(b *testing.B) { + minmax := NewBasicStats() + + for n := 0; n < b.N; n++ { + minmax.Add(m1) + minmax.Add(m2) + } +} + +// Test two metrics getting added. +func TestBasicStatsWithPeriod(t *testing.T) { + acc := testutil.Accumulator{} + minmax := NewBasicStats() + + minmax.Add(m1) + minmax.Add(m2) + minmax.Push(&acc) + + expectedFields := map[string]interface{}{ + "a_count": float64(2), //a + "a_max": float64(1), + "a_min": float64(1), + "a_mean": float64(1), + "a_stdev": float64(0), + "a_s2": float64(0), + "b_count": float64(2), //b + "b_max": float64(3), + "b_min": float64(1), + "b_mean": float64(2), + "b_s2": float64(2), + "b_stdev": math.Sqrt(2), + "c_count": float64(2), //c + "c_max": float64(4), + "c_min": float64(2), + "c_mean": float64(3), + "c_s2": float64(2), + "c_stdev": math.Sqrt(2), + "d_count": float64(2), //d + "d_max": float64(6), + "d_min": float64(2), + "d_mean": float64(4), + "d_s2": float64(8), + "d_stdev": math.Sqrt(8), + "e_count": float64(1), //e + "e_max": float64(200), + "e_min": float64(200), + "e_mean": float64(200), + } + expectedTags := map[string]string{ + "foo": "bar", + } + acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags) +} + +// Test two metrics getting added with a push/reset in between (simulates +// getting added in different periods.) +func TestBasicStatsDifferentPeriods(t *testing.T) { + acc := testutil.Accumulator{} + minmax := NewBasicStats() + + minmax.Add(m1) + minmax.Push(&acc) + expectedFields := map[string]interface{}{ + "a_count": float64(1), //a + "a_max": float64(1), + "a_min": float64(1), + "a_mean": float64(1), + "b_count": float64(1), //b + "b_max": float64(1), + "b_min": float64(1), + "b_mean": float64(1), + "c_count": float64(1), //c + "c_max": float64(2), + "c_min": float64(2), + "c_mean": float64(2), + "d_count": float64(1), //d + "d_max": float64(2), + "d_min": float64(2), + "d_mean": float64(2), + } + expectedTags := map[string]string{ + "foo": "bar", + } + acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags) + + acc.ClearMetrics() + minmax.Reset() + minmax.Add(m2) + minmax.Push(&acc) + expectedFields = map[string]interface{}{ + "a_count": float64(1), //a + "a_max": float64(1), + "a_min": float64(1), + "a_mean": float64(1), + "b_count": float64(1), //b + "b_max": float64(3), + "b_min": float64(3), + "b_mean": float64(3), + "c_count": float64(1), //c + "c_max": float64(4), + "c_min": float64(4), + "c_mean": float64(4), + "d_count": float64(1), //d + "d_max": float64(6), + "d_min": float64(6), + "d_mean": float64(6), + "e_count": float64(1), //e + "e_max": float64(200), + "e_min": float64(200), + "e_mean": float64(200), + } + expectedTags = map[string]string{ + "foo": "bar", + } + acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags) +} From d7ea83f39bb85e68066ef923a084cc61a166e68e Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 10 Oct 2017 12:04:41 -0700 Subject: [PATCH 29/95] Update readme and changelog for basicstats aggregator --- CHANGELOG.md | 2 ++ README.md | 1 + 2 files changed, 3 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 5784a87b3..27fb7ac50 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,6 +1,7 @@ ## v1.5 [unreleased] ### New Plugins +- [basicstats](./plugins/aggregators/basicstats/README.md) - Thanks to @toni-moreno - [jolokia2](./plugins/inputs/jolokia2/README.md) - Thanks to @dylanmei - [nginx_plus](./plugins/inputs/nginx_plus/README.md) - Thanks to @mplonka & @poblahblahblah - [smart](./plugins/inputs/smart/README.md) - Thanks to @rickard-von-essen @@ -41,6 +42,7 @@ - [#2449](https://github.com/influxdata/telegraf/pull/2449): Add smart input plugin for collecting S.M.A.R.T. data. - [#3269](https://github.com/influxdata/telegraf/pull/3269): Add cluster health level configuration to elasticsearch input. - [#3304](https://github.com/influxdata/telegraf/pull/3304): Add ability to limit node stats in elasticsearch input. +- [#2167](https://github.com/influxdata/telegraf/pull/2167): Add new basicstats aggregator. ### Bugfixes diff --git a/README.md b/README.md index a3887e566..2dd836be2 100644 --- a/README.md +++ b/README.md @@ -255,6 +255,7 @@ formats may be used with input plugins supporting the `data_format` option: ## Aggregator Plugins +* [basicstats](./plugins/aggregators/basicstats) * [minmax](./plugins/aggregators/minmax) * [histogram](./plugins/aggregators/histogram) From 4e0c8e6026d3263a5d74ccac7c4c404fb1528430 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 10 Oct 2017 13:29:31 -0700 Subject: [PATCH 30/95] Set 1.4.2 release date --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 27fb7ac50..29c4aaef4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -49,7 +49,7 @@ - [#3136](https://github.com/influxdata/telegraf/issues/3136): Fix webhooks input address in use during reload. - [#3258](https://github.com/influxdata/telegraf/issues/3258): Unlock Statsd when stopping to prevent deadlock. -## v1.4.2 [unreleased] +## v1.4.2 [2017-10-10] ### Bugfixes From c74c29b164ccb0facc7c24bcbcc78fc31e033eba Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 11 Oct 2017 12:56:33 -0700 Subject: [PATCH 31/95] Remove suggested plugins from readme. These are confusing since we don't support all of the examples. --- README.md | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/README.md b/README.md index 2dd836be2..62d725083 100644 --- a/README.md +++ b/README.md @@ -5,8 +5,7 @@ and writing metrics. Design goals are to have a minimal memory footprint with a plugin system so that developers in the community can easily add support for collecting metrics -from well known services (like Hadoop, Postgres, or Redis) and third party -APIs (like Mailchimp, AWS CloudWatch, or Google Analytics). +from local or remote services. Telegraf is plugin-driven and has the concept of 4 distinct plugins: From bed14e503782ec6f8563d0fd46059d88a6d5184f Mon Sep 17 00:00:00 2001 From: Patrick Hemmer Date: Thu, 12 Oct 2017 18:08:51 -0400 Subject: [PATCH 32/95] Fix documented equation for diskio average queue depth (#3334) --- plugins/inputs/system/DISK_README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/inputs/system/DISK_README.md b/plugins/inputs/system/DISK_README.md index b3b6a52fa..31eff937d 100644 --- a/plugins/inputs/system/DISK_README.md +++ b/plugins/inputs/system/DISK_README.md @@ -140,7 +140,7 @@ SELECT derivative(last("io_time"),1ms) FROM "diskio" WHERE time > now() - 30m GR #### Calculate average queue depth: `iops_in_progress` will give you an instantaneous value. This will give you the average between polling intervals. ``` -SELECT derivative(last("weighted_io_time",1ms))/1000 from "diskio" WHERE time > now() - 30m GROUP BY "host","name",time(60s) +SELECT derivative(last("weighted_io_time",1ms)) from "diskio" WHERE time > now() - 30m GROUP BY "host","name",time(60s) ``` ### Example Output: From fa25e123d802e482a2c11a4ad5995ea58ce70dc3 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 12 Oct 2017 15:50:09 -0700 Subject: [PATCH 33/95] Fix container name filters in docker input (#3331) --- filter/filter.go | 37 +++ plugins/inputs/docker/docker.go | 75 ++--- plugins/inputs/docker/docker_test.go | 401 ++++++++++++++++++++------- 3 files changed, 353 insertions(+), 160 deletions(-) diff --git a/filter/filter.go b/filter/filter.go index 9a28c2627..df171257b 100644 --- a/filter/filter.go +++ b/filter/filter.go @@ -77,3 +77,40 @@ func compileFilterNoGlob(filters []string) Filter { } return &out } + +type IncludeExcludeFilter struct { + include Filter + exclude Filter +} + +func NewIncludeExcludeFilter( + include []string, + exclude []string, +) (Filter, error) { + in, err := Compile(include) + if err != nil { + return nil, err + } + + ex, err := Compile(exclude) + if err != nil { + return nil, err + } + + return &IncludeExcludeFilter{in, ex}, nil +} + +func (f *IncludeExcludeFilter) Match(s string) bool { + if f.include != nil { + if !f.include.Match(s) { + return false + } + } + + if f.exclude != nil { + if f.exclude.Match(s) { + return false + } + } + return true +} diff --git a/plugins/inputs/docker/docker.go b/plugins/inputs/docker/docker.go index 171097621..5f39fa0d9 100644 --- a/plugins/inputs/docker/docker.go +++ b/plugins/inputs/docker/docker.go @@ -22,16 +22,6 @@ import ( "github.com/influxdata/telegraf/plugins/inputs" ) -type DockerLabelFilter struct { - labelInclude filter.Filter - labelExclude filter.Filter -} - -type DockerContainerFilter struct { - containerInclude filter.Filter - containerExclude filter.Filter -} - // Docker object type Docker struct { Endpoint string @@ -45,11 +35,9 @@ type Docker struct { TagEnvironment []string `toml:"tag_env"` LabelInclude []string `toml:"docker_label_include"` LabelExclude []string `toml:"docker_label_exclude"` - LabelFilter DockerLabelFilter ContainerInclude []string `toml:"container_name_include"` ContainerExclude []string `toml:"container_name_exclude"` - ContainerFilter DockerContainerFilter SSLCA string `toml:"ssl_ca"` SSLCert string `toml:"ssl_cert"` @@ -59,10 +47,12 @@ type Docker struct { newEnvClient func() (Client, error) newClient func(string, *tls.Config) (Client, error) - client Client - httpClient *http.Client - engine_host string - filtersCreated bool + client Client + httpClient *http.Client + engine_host string + filtersCreated bool + labelFilter filter.Filter + containerFilter filter.Filter } // KB, MB, GB, TB, PB...human friendly @@ -374,12 +364,8 @@ func (d *Docker) gatherContainer( "container_version": imageVersion, } - if len(d.ContainerInclude) > 0 || len(d.ContainerExclude) > 0 { - if len(d.ContainerInclude) == 0 || !d.ContainerFilter.containerInclude.Match(cname) { - if len(d.ContainerExclude) == 0 || d.ContainerFilter.containerExclude.Match(cname) { - return nil - } - } + if !d.containerFilter.Match(cname) { + return nil } ctx, cancel := context.WithTimeout(context.Background(), d.Timeout.Duration) @@ -400,10 +386,8 @@ func (d *Docker) gatherContainer( // Add labels to tags for k, label := range container.Labels { - if len(d.LabelInclude) == 0 || d.LabelFilter.labelInclude.Match(k) { - if len(d.LabelExclude) == 0 || !d.LabelFilter.labelExclude.Match(k) { - tags[k] = label - } + if d.labelFilter.Match(k) { + tags[k] = label } } @@ -749,46 +733,25 @@ func parseSize(sizeStr string) (int64, error) { } func (d *Docker) createContainerFilters() error { + // Backwards compatibility for deprecated `container_names` parameter. if len(d.ContainerNames) > 0 { d.ContainerInclude = append(d.ContainerInclude, d.ContainerNames...) } - if len(d.ContainerInclude) != 0 { - var err error - d.ContainerFilter.containerInclude, err = filter.Compile(d.ContainerInclude) - if err != nil { - return err - } + filter, err := filter.NewIncludeExcludeFilter(d.ContainerInclude, d.ContainerExclude) + if err != nil { + return err } - - if len(d.ContainerExclude) != 0 { - var err error - d.ContainerFilter.containerExclude, err = filter.Compile(d.ContainerExclude) - if err != nil { - return err - } - } - + d.containerFilter = filter return nil } func (d *Docker) createLabelFilters() error { - if len(d.LabelInclude) != 0 { - var err error - d.LabelFilter.labelInclude, err = filter.Compile(d.LabelInclude) - if err != nil { - return err - } + filter, err := filter.NewIncludeExcludeFilter(d.LabelInclude, d.LabelExclude) + if err != nil { + return err } - - if len(d.LabelExclude) != 0 { - var err error - d.LabelFilter.labelExclude, err = filter.Compile(d.LabelExclude) - if err != nil { - return err - } - } - + d.labelFilter = filter return nil } diff --git a/plugins/inputs/docker/docker_test.go b/plugins/inputs/docker/docker_test.go index b18274136..088c94597 100644 --- a/plugins/inputs/docker/docker_test.go +++ b/plugins/inputs/docker/docker_test.go @@ -69,30 +69,32 @@ func (c *MockClient) NodeList( return c.NodeListF(ctx, options) } +var baseClient = MockClient{ + InfoF: func(context.Context) (types.Info, error) { + return info, nil + }, + ContainerListF: func(context.Context, types.ContainerListOptions) ([]types.Container, error) { + return containerList, nil + }, + ContainerStatsF: func(context.Context, string, bool) (types.ContainerStats, error) { + return containerStats(), nil + }, + ContainerInspectF: func(context.Context, string) (types.ContainerJSON, error) { + return containerInspect, nil + }, + ServiceListF: func(context.Context, types.ServiceListOptions) ([]swarm.Service, error) { + return ServiceList, nil + }, + TaskListF: func(context.Context, types.TaskListOptions) ([]swarm.Task, error) { + return TaskList, nil + }, + NodeListF: func(context.Context, types.NodeListOptions) ([]swarm.Node, error) { + return NodeList, nil + }, +} + func newClient(host string, tlsConfig *tls.Config) (Client, error) { - return &MockClient{ - InfoF: func(context.Context) (types.Info, error) { - return info, nil - }, - ContainerListF: func(context.Context, types.ContainerListOptions) ([]types.Container, error) { - return containerList, nil - }, - ContainerStatsF: func(context.Context, string, bool) (types.ContainerStats, error) { - return containerStats(), nil - }, - ContainerInspectF: func(context.Context, string) (types.ContainerJSON, error) { - return containerInspect, nil - }, - ServiceListF: func(context.Context, types.ServiceListOptions) ([]swarm.Service, error) { - return ServiceList, nil - }, - TaskListF: func(context.Context, types.TaskListOptions) ([]swarm.Task, error) { - return TaskList, nil - }, - NodeListF: func(context.Context, types.NodeListOptions) ([]swarm.Node, error) { - return NodeList, nil - }, - }, nil + return &baseClient, nil } func TestDockerGatherContainerStats(t *testing.T) { @@ -277,82 +279,291 @@ func TestDocker_WindowsMemoryContainerStats(t *testing.T) { require.NoError(t, err) } -func TestDockerGatherLabels(t *testing.T) { - var gatherLabelsTests = []struct { - include []string - exclude []string - expected []string - notexpected []string +func TestContainerLabels(t *testing.T) { + var tests = []struct { + name string + container types.Container + include []string + exclude []string + expected map[string]string }{ - {[]string{}, []string{}, []string{"label1", "label2"}, []string{}}, - {[]string{"*"}, []string{}, []string{"label1", "label2"}, []string{}}, - {[]string{"lab*"}, []string{}, []string{"label1", "label2"}, []string{}}, - {[]string{"label1"}, []string{}, []string{"label1"}, []string{"label2"}}, - {[]string{"label1*"}, []string{}, []string{"label1"}, []string{"label2"}}, - {[]string{}, []string{"*"}, []string{}, []string{"label1", "label2"}}, - {[]string{}, []string{"lab*"}, []string{}, []string{"label1", "label2"}}, - {[]string{}, []string{"label1"}, []string{"label2"}, []string{"label1"}}, - {[]string{"*"}, []string{"*"}, []string{}, []string{"label1", "label2"}}, + { + name: "Nil filters matches all", + container: types.Container{ + Labels: map[string]string{ + "a": "x", + }, + }, + include: nil, + exclude: nil, + expected: map[string]string{ + "a": "x", + }, + }, + { + name: "Empty filters matches all", + container: types.Container{ + Labels: map[string]string{ + "a": "x", + }, + }, + include: []string{}, + exclude: []string{}, + expected: map[string]string{ + "a": "x", + }, + }, + { + name: "Must match include", + container: types.Container{ + Labels: map[string]string{ + "a": "x", + "b": "y", + }, + }, + include: []string{"a"}, + exclude: []string{}, + expected: map[string]string{ + "a": "x", + }, + }, + { + name: "Must not match exclude", + container: types.Container{ + Labels: map[string]string{ + "a": "x", + "b": "y", + }, + }, + include: []string{}, + exclude: []string{"b"}, + expected: map[string]string{ + "a": "x", + }, + }, + { + name: "Include Glob", + container: types.Container{ + Labels: map[string]string{ + "aa": "x", + "ab": "y", + "bb": "z", + }, + }, + include: []string{"a*"}, + exclude: []string{}, + expected: map[string]string{ + "aa": "x", + "ab": "y", + }, + }, + { + name: "Exclude Glob", + container: types.Container{ + Labels: map[string]string{ + "aa": "x", + "ab": "y", + "bb": "z", + }, + }, + include: []string{}, + exclude: []string{"a*"}, + expected: map[string]string{ + "bb": "z", + }, + }, + { + name: "Excluded Includes", + container: types.Container{ + Labels: map[string]string{ + "aa": "x", + "ab": "y", + "bb": "z", + }, + }, + include: []string{"a*"}, + exclude: []string{"*b"}, + expected: map[string]string{ + "aa": "x", + }, + }, } - - for _, tt := range gatherLabelsTests { - t.Run("", func(t *testing.T) { + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { var acc testutil.Accumulator - d := Docker{ - newClient: newClient, + + newClientFunc := func(host string, tlsConfig *tls.Config) (Client, error) { + client := baseClient + client.ContainerListF = func(context.Context, types.ContainerListOptions) ([]types.Container, error) { + return []types.Container{tt.container}, nil + } + return &client, nil } - for _, label := range tt.include { - d.LabelInclude = append(d.LabelInclude, label) - } - for _, label := range tt.exclude { - d.LabelExclude = append(d.LabelExclude, label) + d := Docker{ + newClient: newClientFunc, + LabelInclude: tt.include, + LabelExclude: tt.exclude, } err := d.Gather(&acc) require.NoError(t, err) - for _, label := range tt.expected { - if !acc.HasTag("docker_container_cpu", label) { - t.Errorf("Didn't get expected label of %s. Test was: Include: %s Exclude %s", - label, tt.include, tt.exclude) + // Grab tags from a container metric + var actual map[string]string + for _, metric := range acc.Metrics { + if metric.Measurement == "docker_container_cpu" { + actual = metric.Tags } } - for _, label := range tt.notexpected { - if acc.HasTag("docker_container_cpu", label) { - t.Errorf("Got unexpected label of %s. Test was: Include: %s Exclude %s", - label, tt.include, tt.exclude) - } + for k, v := range tt.expected { + require.Equal(t, v, actual[k]) } }) } } func TestContainerNames(t *testing.T) { - var gatherContainerNames = []struct { - include []string - exclude []string - expected []string - notexpected []string + var tests = []struct { + name string + containers [][]string + include []string + exclude []string + expected []string }{ - {[]string{}, []string{}, []string{"etcd", "etcd2"}, []string{}}, - {[]string{"*"}, []string{}, []string{"etcd", "etcd2"}, []string{}}, - {[]string{"etc*"}, []string{}, []string{"etcd", "etcd2"}, []string{}}, - {[]string{"etcd"}, []string{}, []string{"etcd"}, []string{"etcd2"}}, - {[]string{"etcd2*"}, []string{}, []string{"etcd2"}, []string{"etcd"}}, - {[]string{}, []string{"etc*"}, []string{}, []string{"etcd", "etcd2"}}, - {[]string{}, []string{"etcd"}, []string{"etcd2"}, []string{"etcd"}}, - {[]string{"*"}, []string{"*"}, []string{"etcd", "etcd2"}, []string{}}, - {[]string{}, []string{"*"}, []string{""}, []string{"etcd", "etcd2"}}, + { + name: "Nil filters matches all", + containers: [][]string{ + {"/etcd"}, + {"/etcd2"}, + }, + include: nil, + exclude: nil, + expected: []string{"etcd", "etcd2"}, + }, + { + name: "Empty filters matches all", + containers: [][]string{ + {"/etcd"}, + {"/etcd2"}, + }, + include: []string{}, + exclude: []string{}, + expected: []string{"etcd", "etcd2"}, + }, + { + name: "Match all containers", + containers: [][]string{ + {"/etcd"}, + {"/etcd2"}, + }, + include: []string{"*"}, + exclude: []string{}, + expected: []string{"etcd", "etcd2"}, + }, + { + name: "Include prefix match", + containers: [][]string{ + {"/etcd"}, + {"/etcd2"}, + }, + include: []string{"etc*"}, + exclude: []string{}, + expected: []string{"etcd", "etcd2"}, + }, + { + name: "Exact match", + containers: [][]string{ + {"/etcd"}, + {"/etcd2"}, + }, + include: []string{"etcd"}, + exclude: []string{}, + expected: []string{"etcd"}, + }, + { + name: "Star matches zero length", + containers: [][]string{ + {"/etcd"}, + {"/etcd2"}, + }, + include: []string{"etcd2*"}, + exclude: []string{}, + expected: []string{"etcd2"}, + }, + { + name: "Exclude matches all", + containers: [][]string{ + {"/etcd"}, + {"/etcd2"}, + }, + include: []string{}, + exclude: []string{"etc*"}, + expected: []string{}, + }, + { + name: "Exclude single", + containers: [][]string{ + {"/etcd"}, + {"/etcd2"}, + }, + include: []string{}, + exclude: []string{"etcd"}, + expected: []string{"etcd2"}, + }, + { + name: "Exclude all", + containers: [][]string{ + {"/etcd"}, + {"/etcd2"}, + }, + include: []string{"*"}, + exclude: []string{"*"}, + expected: []string{}, + }, + { + name: "Exclude item matching include", + containers: [][]string{ + {"acme"}, + {"foo"}, + {"acme-test"}, + }, + include: []string{"acme*"}, + exclude: []string{"*test*"}, + expected: []string{"acme"}, + }, + { + name: "Exclude item no wildcards", + containers: [][]string{ + {"acme"}, + {"acme-test"}, + }, + include: []string{"acme*"}, + exclude: []string{"test"}, + expected: []string{"acme", "acme-test"}, + }, } - - for _, tt := range gatherContainerNames { - t.Run("", func(t *testing.T) { + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { var acc testutil.Accumulator + newClientFunc := func(host string, tlsConfig *tls.Config) (Client, error) { + client := baseClient + client.ContainerListF = func(context.Context, types.ContainerListOptions) ([]types.Container, error) { + var containers []types.Container + for _, names := range tt.containers { + containers = append(containers, types.Container{ + Names: names, + }) + } + return containers, nil + } + return &client, nil + } + d := Docker{ - newClient: newClient, + newClient: newClientFunc, ContainerInclude: tt.include, ContainerExclude: tt.exclude, } @@ -360,39 +571,21 @@ func TestContainerNames(t *testing.T) { err := d.Gather(&acc) require.NoError(t, err) + // Set of expected names + var expected = make(map[string]bool) + for _, v := range tt.expected { + expected[v] = true + } + + // Set of actual names + var actual = make(map[string]bool) for _, metric := range acc.Metrics { - if metric.Measurement == "docker_container_cpu" { - if val, ok := metric.Tags["container_name"]; ok { - var found bool = false - for _, cname := range tt.expected { - if val == cname { - found = true - break - } - } - if !found { - t.Errorf("Got unexpected container of %s. Test was -> Include: %s, Exclude: %s", val, tt.include, tt.exclude) - } - } + if name, ok := metric.Tags["container_name"]; ok { + actual[name] = true } } - for _, metric := range acc.Metrics { - if metric.Measurement == "docker_container_cpu" { - if val, ok := metric.Tags["container_name"]; ok { - var found bool = false - for _, cname := range tt.notexpected { - if val == cname { - found = true - break - } - } - if found { - t.Errorf("Got unexpected container of %s. Test was -> Include: %s, Exclude: %s", val, tt.include, tt.exclude) - } - } - } - } + require.Equal(t, expected, actual) }) } } From 024dea2ff98c191bdff0e8bcf7514074f5e0e8de Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 12 Oct 2017 15:52:01 -0700 Subject: [PATCH 34/95] Update changelog --- CHANGELOG.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 29c4aaef4..9946b2a21 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -49,6 +49,12 @@ - [#3136](https://github.com/influxdata/telegraf/issues/3136): Fix webhooks input address in use during reload. - [#3258](https://github.com/influxdata/telegraf/issues/3258): Unlock Statsd when stopping to prevent deadlock. +## v1.4.3 [unreleased] + +### Bugfixes + +- [#3327](https://github.com/influxdata/telegraf/issues/3327): Fix container name filters in docker input. + ## v1.4.2 [2017-10-10] ### Bugfixes From 2f8d0f4d4784f1ee217bce2c40c94b1dd78bd908 Mon Sep 17 00:00:00 2001 From: Windkit Li Date: Fri, 13 Oct 2017 09:26:14 +0900 Subject: [PATCH 35/95] Fix snmpwalk address format in leofs input (#3328) --- etc/telegraf.conf | 4 +- plugins/inputs/leofs/leofs.go | 40 ++++----- plugins/inputs/leofs/leofs_test.go | 136 ++++++++++++++--------------- 3 files changed, 86 insertions(+), 94 deletions(-) diff --git a/etc/telegraf.conf b/etc/telegraf.conf index 2c23864cf..f3128b8fd 100644 --- a/etc/telegraf.conf +++ b/etc/telegraf.conf @@ -1586,8 +1586,8 @@ # # Read metrics from a LeoFS Server via SNMP # [[inputs.leofs]] # ## An array of URLs of the form: -# ## "udp://" host [ ":" port] -# servers = ["udp://127.0.0.1:4020"] +# ## host [ ":" port] +# servers = ["127.0.0.1:4020"] # # Provides Linux sysctl fs metrics diff --git a/plugins/inputs/leofs/leofs.go b/plugins/inputs/leofs/leofs.go index 55a727ef8..9381eace8 100644 --- a/plugins/inputs/leofs/leofs.go +++ b/plugins/inputs/leofs/leofs.go @@ -3,8 +3,6 @@ package leofs import ( "bufio" "fmt" - "log" - "net/url" "os/exec" "strconv" "strings" @@ -19,7 +17,7 @@ import ( const oid = ".1.3.6.1.4.1.35450" // For Manager Master -const defaultEndpoint = "udp://127.0.0.1:4020" +const defaultEndpoint = "127.0.0.1:4020" type ServerType int @@ -137,8 +135,8 @@ var serverTypeMapping = map[string]ServerType{ var sampleConfig = ` ## An array of URLs of the form: - ## "udp://" host [ ":" port] - servers = ["udp://127.0.0.1:4020"] + ## host [ ":" port] + servers = ["127.0.0.1:4020"] ` func (l *LeoFS) SampleConfig() string { @@ -155,28 +153,22 @@ func (l *LeoFS) Gather(acc telegraf.Accumulator) error { return nil } var wg sync.WaitGroup - for i, endpoint := range l.Servers { - if !strings.HasPrefix(endpoint, "udp://") { - // Preserve backwards compatibility for hostnames without a - // scheme, broken in go 1.8. Remove in Telegraf 2.0 - endpoint = "udp://" + endpoint - log.Printf("W! [inputs.mongodb] Using %q as connection URL; please update your configuration to use an URL", endpoint) - l.Servers[i] = endpoint - } - u, err := url.Parse(endpoint) - if err != nil { - acc.AddError(fmt.Errorf("Unable to parse address %q: %s", endpoint, err)) - continue - } - if u.Host == "" { + for _, endpoint := range l.Servers { + results := strings.Split(endpoint, ":") + + port := "4020" + if len(results) > 2 { acc.AddError(fmt.Errorf("Unable to parse address %q", endpoint)) continue + } else if len(results) == 2 { + if _, err := strconv.Atoi(results[1]); err == nil { + port = results[1] + } else { + acc.AddError(fmt.Errorf("Unable to parse port from %q", endpoint)) + continue + } } - port := u.Port() - if port == "" { - port = "4020" - } st, ok := serverTypeMapping[port] if !ok { st = ServerTypeStorage @@ -196,7 +188,7 @@ func (l *LeoFS) gatherServer( serverType ServerType, acc telegraf.Accumulator, ) error { - cmd := exec.Command("snmpwalk", "-v2c", "-cpublic", endpoint, oid) + cmd := exec.Command("snmpwalk", "-v2c", "-cpublic", "-On", endpoint, oid) stdout, err := cmd.StdoutPipe() if err != nil { return err diff --git a/plugins/inputs/leofs/leofs_test.go b/plugins/inputs/leofs/leofs_test.go index a5ca30432..f3699b8d9 100644 --- a/plugins/inputs/leofs/leofs_test.go +++ b/plugins/inputs/leofs/leofs_test.go @@ -16,21 +16,21 @@ package main import "fmt" -const output = ` + "`" + `iso.3.6.1.4.1.35450.15.1.0 = STRING: "manager_888@127.0.0.1" -iso.3.6.1.4.1.35450.15.2.0 = Gauge32: 186 -iso.3.6.1.4.1.35450.15.3.0 = Gauge32: 46235519 -iso.3.6.1.4.1.35450.15.4.0 = Gauge32: 32168525 -iso.3.6.1.4.1.35450.15.5.0 = Gauge32: 14066068 -iso.3.6.1.4.1.35450.15.6.0 = Gauge32: 5512968 -iso.3.6.1.4.1.35450.15.7.0 = Gauge32: 186 -iso.3.6.1.4.1.35450.15.8.0 = Gauge32: 46269006 -iso.3.6.1.4.1.35450.15.9.0 = Gauge32: 32202867 -iso.3.6.1.4.1.35450.15.10.0 = Gauge32: 14064995 -iso.3.6.1.4.1.35450.15.11.0 = Gauge32: 5492634 -iso.3.6.1.4.1.35450.15.12.0 = Gauge32: 60 -iso.3.6.1.4.1.35450.15.13.0 = Gauge32: 43515904 -iso.3.6.1.4.1.35450.15.14.0 = Gauge32: 60 -iso.3.6.1.4.1.35450.15.15.0 = Gauge32: 43533983` + "`" + +const output = ` + "`" + `.1.3.6.1.4.1.35450.15.1.0 = STRING: "manager_888@127.0.0.1" +.1.3.6.1.4.1.35450.15.2.0 = Gauge32: 186 +.1.3.6.1.4.1.35450.15.3.0 = Gauge32: 46235519 +.1.3.6.1.4.1.35450.15.4.0 = Gauge32: 32168525 +.1.3.6.1.4.1.35450.15.5.0 = Gauge32: 14066068 +.1.3.6.1.4.1.35450.15.6.0 = Gauge32: 5512968 +.1.3.6.1.4.1.35450.15.7.0 = Gauge32: 186 +.1.3.6.1.4.1.35450.15.8.0 = Gauge32: 46269006 +.1.3.6.1.4.1.35450.15.9.0 = Gauge32: 32202867 +.1.3.6.1.4.1.35450.15.10.0 = Gauge32: 14064995 +.1.3.6.1.4.1.35450.15.11.0 = Gauge32: 5492634 +.1.3.6.1.4.1.35450.15.12.0 = Gauge32: 60 +.1.3.6.1.4.1.35450.15.13.0 = Gauge32: 43515904 +.1.3.6.1.4.1.35450.15.14.0 = Gauge32: 60 +.1.3.6.1.4.1.35450.15.15.0 = Gauge32: 43533983` + "`" + ` func main() { fmt.Println(output) @@ -42,34 +42,34 @@ package main import "fmt" -const output = ` + "`" + `iso.3.6.1.4.1.35450.34.1.0 = STRING: "storage_0@127.0.0.1" -iso.3.6.1.4.1.35450.34.2.0 = Gauge32: 512 -iso.3.6.1.4.1.35450.34.3.0 = Gauge32: 38126307 -iso.3.6.1.4.1.35450.34.4.0 = Gauge32: 22308716 -iso.3.6.1.4.1.35450.34.5.0 = Gauge32: 15816448 -iso.3.6.1.4.1.35450.34.6.0 = Gauge32: 5232008 -iso.3.6.1.4.1.35450.34.7.0 = Gauge32: 512 -iso.3.6.1.4.1.35450.34.8.0 = Gauge32: 38113176 -iso.3.6.1.4.1.35450.34.9.0 = Gauge32: 22313398 -iso.3.6.1.4.1.35450.34.10.0 = Gauge32: 15798779 -iso.3.6.1.4.1.35450.34.11.0 = Gauge32: 5237315 -iso.3.6.1.4.1.35450.34.12.0 = Gauge32: 191 -iso.3.6.1.4.1.35450.34.13.0 = Gauge32: 824 -iso.3.6.1.4.1.35450.34.14.0 = Gauge32: 0 -iso.3.6.1.4.1.35450.34.15.0 = Gauge32: 50105 -iso.3.6.1.4.1.35450.34.16.0 = Gauge32: 196654 -iso.3.6.1.4.1.35450.34.17.0 = Gauge32: 0 -iso.3.6.1.4.1.35450.34.18.0 = Gauge32: 2052 -iso.3.6.1.4.1.35450.34.19.0 = Gauge32: 50296 -iso.3.6.1.4.1.35450.34.20.0 = Gauge32: 35 -iso.3.6.1.4.1.35450.34.21.0 = Gauge32: 898 -iso.3.6.1.4.1.35450.34.22.0 = Gauge32: 0 -iso.3.6.1.4.1.35450.34.23.0 = Gauge32: 0 -iso.3.6.1.4.1.35450.34.24.0 = Gauge32: 0 -iso.3.6.1.4.1.35450.34.31.0 = Gauge32: 51 -iso.3.6.1.4.1.35450.34.32.0 = Gauge32: 53219328 -iso.3.6.1.4.1.35450.34.33.0 = Gauge32: 51 -iso.3.6.1.4.1.35450.34.34.0 = Gauge32: 53351083` + "`" + +const output = ` + "`" + `.1.3.6.1.4.1.35450.34.1.0 = STRING: "storage_0@127.0.0.1" +.1.3.6.1.4.1.35450.34.2.0 = Gauge32: 512 +.1.3.6.1.4.1.35450.34.3.0 = Gauge32: 38126307 +.1.3.6.1.4.1.35450.34.4.0 = Gauge32: 22308716 +.1.3.6.1.4.1.35450.34.5.0 = Gauge32: 15816448 +.1.3.6.1.4.1.35450.34.6.0 = Gauge32: 5232008 +.1.3.6.1.4.1.35450.34.7.0 = Gauge32: 512 +.1.3.6.1.4.1.35450.34.8.0 = Gauge32: 38113176 +.1.3.6.1.4.1.35450.34.9.0 = Gauge32: 22313398 +.1.3.6.1.4.1.35450.34.10.0 = Gauge32: 15798779 +.1.3.6.1.4.1.35450.34.11.0 = Gauge32: 5237315 +.1.3.6.1.4.1.35450.34.12.0 = Gauge32: 191 +.1.3.6.1.4.1.35450.34.13.0 = Gauge32: 824 +.1.3.6.1.4.1.35450.34.14.0 = Gauge32: 0 +.1.3.6.1.4.1.35450.34.15.0 = Gauge32: 50105 +.1.3.6.1.4.1.35450.34.16.0 = Gauge32: 196654 +.1.3.6.1.4.1.35450.34.17.0 = Gauge32: 0 +.1.3.6.1.4.1.35450.34.18.0 = Gauge32: 2052 +.1.3.6.1.4.1.35450.34.19.0 = Gauge32: 50296 +.1.3.6.1.4.1.35450.34.20.0 = Gauge32: 35 +.1.3.6.1.4.1.35450.34.21.0 = Gauge32: 898 +.1.3.6.1.4.1.35450.34.22.0 = Gauge32: 0 +.1.3.6.1.4.1.35450.34.23.0 = Gauge32: 0 +.1.3.6.1.4.1.35450.34.24.0 = Gauge32: 0 +.1.3.6.1.4.1.35450.34.31.0 = Gauge32: 51 +.1.3.6.1.4.1.35450.34.32.0 = Gauge32: 53219328 +.1.3.6.1.4.1.35450.34.33.0 = Gauge32: 51 +.1.3.6.1.4.1.35450.34.34.0 = Gauge32: 53351083` + "`" + ` func main() { fmt.Println(output) @@ -81,31 +81,31 @@ package main import "fmt" -const output = ` + "`" + `iso.3.6.1.4.1.35450.34.1.0 = STRING: "gateway_0@127.0.0.1" -iso.3.6.1.4.1.35450.34.2.0 = Gauge32: 465 -iso.3.6.1.4.1.35450.34.3.0 = Gauge32: 61676335 -iso.3.6.1.4.1.35450.34.4.0 = Gauge32: 46890415 -iso.3.6.1.4.1.35450.34.5.0 = Gauge32: 14785011 -iso.3.6.1.4.1.35450.34.6.0 = Gauge32: 5578855 -iso.3.6.1.4.1.35450.34.7.0 = Gauge32: 465 -iso.3.6.1.4.1.35450.34.8.0 = Gauge32: 61644426 -iso.3.6.1.4.1.35450.34.9.0 = Gauge32: 46880358 -iso.3.6.1.4.1.35450.34.10.0 = Gauge32: 14763002 -iso.3.6.1.4.1.35450.34.11.0 = Gauge32: 5582125 -iso.3.6.1.4.1.35450.34.12.0 = Gauge32: 191 -iso.3.6.1.4.1.35450.34.13.0 = Gauge32: 827 -iso.3.6.1.4.1.35450.34.14.0 = Gauge32: 0 -iso.3.6.1.4.1.35450.34.15.0 = Gauge32: 50105 -iso.3.6.1.4.1.35450.34.16.0 = Gauge32: 196650 -iso.3.6.1.4.1.35450.34.17.0 = Gauge32: 0 -iso.3.6.1.4.1.35450.34.18.0 = Gauge32: 30256 -iso.3.6.1.4.1.35450.34.19.0 = Gauge32: 532158 -iso.3.6.1.4.1.35450.34.20.0 = Gauge32: 34 -iso.3.6.1.4.1.35450.34.21.0 = Gauge32: 1 -iso.3.6.1.4.1.35450.34.31.0 = Gauge32: 53 -iso.3.6.1.4.1.35450.34.32.0 = Gauge32: 55050240 -iso.3.6.1.4.1.35450.34.33.0 = Gauge32: 53 -iso.3.6.1.4.1.35450.34.34.0 = Gauge32: 55186538` + "`" + +const output = ` + "`" + `.1.3.6.1.4.1.35450.34.1.0 = STRING: "gateway_0@127.0.0.1" +.1.3.6.1.4.1.35450.34.2.0 = Gauge32: 465 +.1.3.6.1.4.1.35450.34.3.0 = Gauge32: 61676335 +.1.3.6.1.4.1.35450.34.4.0 = Gauge32: 46890415 +.1.3.6.1.4.1.35450.34.5.0 = Gauge32: 14785011 +.1.3.6.1.4.1.35450.34.6.0 = Gauge32: 5578855 +.1.3.6.1.4.1.35450.34.7.0 = Gauge32: 465 +.1.3.6.1.4.1.35450.34.8.0 = Gauge32: 61644426 +.1.3.6.1.4.1.35450.34.9.0 = Gauge32: 46880358 +.1.3.6.1.4.1.35450.34.10.0 = Gauge32: 14763002 +.1.3.6.1.4.1.35450.34.11.0 = Gauge32: 5582125 +.1.3.6.1.4.1.35450.34.12.0 = Gauge32: 191 +.1.3.6.1.4.1.35450.34.13.0 = Gauge32: 827 +.1.3.6.1.4.1.35450.34.14.0 = Gauge32: 0 +.1.3.6.1.4.1.35450.34.15.0 = Gauge32: 50105 +.1.3.6.1.4.1.35450.34.16.0 = Gauge32: 196650 +.1.3.6.1.4.1.35450.34.17.0 = Gauge32: 0 +.1.3.6.1.4.1.35450.34.18.0 = Gauge32: 30256 +.1.3.6.1.4.1.35450.34.19.0 = Gauge32: 532158 +.1.3.6.1.4.1.35450.34.20.0 = Gauge32: 34 +.1.3.6.1.4.1.35450.34.21.0 = Gauge32: 1 +.1.3.6.1.4.1.35450.34.31.0 = Gauge32: 53 +.1.3.6.1.4.1.35450.34.32.0 = Gauge32: 55050240 +.1.3.6.1.4.1.35450.34.33.0 = Gauge32: 53 +.1.3.6.1.4.1.35450.34.34.0 = Gauge32: 55186538` + "`" + ` func main() { fmt.Println(output) From 0f9f757da7d0a432d3e9e2df3aff2f969037b726 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 12 Oct 2017 17:26:58 -0700 Subject: [PATCH 36/95] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9946b2a21..325512d8e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -54,6 +54,7 @@ ### Bugfixes - [#3327](https://github.com/influxdata/telegraf/issues/3327): Fix container name filters in docker input. +- [#3321](https://github.com/influxdata/telegraf/issues/3321): Fix snmpwalk address format in leofs input. ## v1.4.2 [2017-10-10] From bf9f94eb9da63bee4168b7d4f3c72862c4797ad0 Mon Sep 17 00:00:00 2001 From: Adam Johnson Date: Fri, 13 Oct 2017 19:04:40 +0100 Subject: [PATCH 37/95] Fix cloudwatch output requires unneeded permissions (#3335) --- plugins/outputs/cloudwatch/README.md | 2 ++ plugins/outputs/cloudwatch/cloudwatch.go | 16 ++++++++-------- 2 files changed, 10 insertions(+), 8 deletions(-) diff --git a/plugins/outputs/cloudwatch/README.md b/plugins/outputs/cloudwatch/README.md index 5544b25c7..c44ac4ead 100644 --- a/plugins/outputs/cloudwatch/README.md +++ b/plugins/outputs/cloudwatch/README.md @@ -13,6 +13,8 @@ API endpoint. In the following order the plugin will attempt to authenticate. 5. [Shared Credentials](https://github.com/aws/aws-sdk-go/wiki/configuring-sdk#shared-credentials-file) 6. [EC2 Instance Profile](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html) +The IAM user needs only the `cloudwatch:PutMetricData` permission. + ## Config For this output plugin to function correctly the following variables diff --git a/plugins/outputs/cloudwatch/cloudwatch.go b/plugins/outputs/cloudwatch/cloudwatch.go index a04e86cde..b14953dbe 100644 --- a/plugins/outputs/cloudwatch/cloudwatch.go +++ b/plugins/outputs/cloudwatch/cloudwatch.go @@ -9,6 +9,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/cloudwatch" + "github.com/aws/aws-sdk-go/service/sts" "github.com/influxdata/telegraf" internalaws "github.com/influxdata/telegraf/internal/config/aws" @@ -71,21 +72,20 @@ func (c *CloudWatch) Connect() error { } configProvider := credentialConfig.Credentials() - svc := cloudwatch.New(configProvider) + stsService := sts.New(configProvider) - params := &cloudwatch.ListMetricsInput{ - Namespace: aws.String(c.Namespace), - } + params := &sts.GetSessionTokenInput{} - _, err := svc.ListMetrics(params) // Try a read-only call to test connection. + _, err := stsService.GetSessionToken(params) if err != nil { - log.Printf("E! cloudwatch: Error in ListMetrics API call : %+v \n", err.Error()) + log.Printf("E! cloudwatch: Cannot use credentials to connect to AWS : %+v \n", err.Error()) + return err } - c.svc = svc + c.svc = cloudwatch.New(configProvider) - return err + return nil } func (c *CloudWatch) Close() error { From 5cd3327d5f84524f5eb2820a4daaf94259f3492c Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 13 Oct 2017 11:12:27 -0700 Subject: [PATCH 38/95] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 325512d8e..efaeb4476 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -48,6 +48,7 @@ - [#3136](https://github.com/influxdata/telegraf/issues/3136): Fix webhooks input address in use during reload. - [#3258](https://github.com/influxdata/telegraf/issues/3258): Unlock Statsd when stopping to prevent deadlock. +- [#3319](https://github.com/influxdata/telegraf/issues/3319): Fix cloudwatch output requires unneeded permissions. ## v1.4.3 [unreleased] From 6b67fedfdc340085a3e10963c49cbc91033e0a50 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 13 Oct 2017 11:30:30 -0700 Subject: [PATCH 39/95] Remove timing sensitive riemann test --- plugins/outputs/riemann/riemann_test.go | 51 ------------------------- 1 file changed, 51 deletions(-) diff --git a/plugins/outputs/riemann/riemann_test.go b/plugins/outputs/riemann/riemann_test.go index 0b7c85403..61b7b3796 100644 --- a/plugins/outputs/riemann/riemann_test.go +++ b/plugins/outputs/riemann/riemann_test.go @@ -1,14 +1,11 @@ package riemann import ( - "fmt" "testing" "time" "github.com/amir/raidman" - "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/metric" - "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" ) @@ -162,51 +159,3 @@ func TestStateEvents(t *testing.T) { } require.Equal(t, expectedEvent, events[0]) } - -func TestConnectAndWrite(t *testing.T) { - if testing.Short() { - t.Skip("Skipping integration test in short mode") - } - - r := &Riemann{ - URL: fmt.Sprintf("tcp://%s:5555", testutil.GetLocalHost()), - TTL: 15.0, - Separator: "/", - MeasurementAsAttribute: false, - StringAsState: true, - DescriptionText: "metrics from telegraf", - Tags: []string{"docker"}, - } - - err := r.Connect() - require.NoError(t, err) - - err = r.Write(testutil.MockMetrics()) - require.NoError(t, err) - - metrics := make([]telegraf.Metric, 0) - metrics = append(metrics, testutil.TestMetric(2)) - metrics = append(metrics, testutil.TestMetric(3.456789)) - metrics = append(metrics, testutil.TestMetric(uint(0))) - metrics = append(metrics, testutil.TestMetric("ok")) - metrics = append(metrics, testutil.TestMetric("running")) - err = r.Write(metrics) - require.NoError(t, err) - - // are there any "docker" tagged events in Riemann? - events, err := r.client.Query(`tagged "docker"`) - require.NoError(t, err) - require.NotZero(t, len(events)) - - // get Riemann events with state = "running", should be 1 event - events, err = r.client.Query(`state = "running"`) - require.NoError(t, err) - require.Len(t, events, 1) - - // is event as expected? - require.Equal(t, []string{"docker", "value1"}, events[0].Tags) - require.Equal(t, "running", events[0].State) - require.Equal(t, "test1/value", events[0].Service) - require.Equal(t, "metrics from telegraf", events[0].Description) - require.Equal(t, map[string]string{"tag1": "value1"}, events[0].Attributes) -} From a1796989f71626a37a75d830b3a24ef9e2b0153b Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 13 Oct 2017 13:53:18 -0700 Subject: [PATCH 40/95] Add ipmi_sensor permission documentation --- plugins/inputs/ipmi_sensor/README.md | 73 ++++++++++++++++------------ 1 file changed, 42 insertions(+), 31 deletions(-) diff --git a/plugins/inputs/ipmi_sensor/README.md b/plugins/inputs/ipmi_sensor/README.md index a1f6c8129..90abf393a 100644 --- a/plugins/inputs/ipmi_sensor/README.md +++ b/plugins/inputs/ipmi_sensor/README.md @@ -1,8 +1,7 @@ -# Telegraf ipmi plugin +# IPMI Sensor Input Plugin -Get bare metal metrics using the command line utility `ipmitool` - -see ipmitool(https://sourceforge.net/projects/ipmitool/files/ipmitool/) +Get bare metal metrics using the command line utility +[`ipmitool`](https://sourceforge.net/projects/ipmitool/files/ipmitool/). If no servers are specified, the plugin will query the local machine sensor stats via the following command: @@ -16,18 +15,7 @@ When one or more servers are specified, the plugin will use the following comman ipmitool -I lan -H SERVER -U USERID -P PASSW0RD sdr ``` -## Measurements - -- ipmi_sensor: - - * Tags: `name`, `unit` - * Fields: - - status - - value - -The `server` tag will be made available when retrieving stats from remote server(s). - -## Configuration +### Configuration ```toml # Read metrics from the bare metal servers via IPMI @@ -52,26 +40,49 @@ The `server` tag will be made available when retrieving stats from remote server timeout = "20s" ``` -## Output +### Measurements + +- ipmi_sensor: + - tags: + - name + - unit + - server (only when retrieving stats from remote servers) + - fields: + - status (int) + - value (float) + + +#### Permissions + +When gathering from the local system, Telegraf will need permission to the +impi device node. When using udev you can create the device node giving +`rw` permissions to the `telegraf` user by adding the following rule to +`/etc/udev/rules.d/52-telegraf-ipmi.rules`: + +``` +KERNEL=="ipmi*", MODE="660", GROUP="telegraf" +``` + +### Example Output When retrieving stats from a remote server: ``` -> ipmi_sensor,server=10.20.2.203,unit=degrees_c,name=ambient_temp status=1i,value=20 1458488465012559455 -> ipmi_sensor,server=10.20.2.203,unit=feet,name=altitude status=1i,value=80 1458488465012688613 -> ipmi_sensor,server=10.20.2.203,unit=watts,name=avg_power status=1i,value=220 1458488465012776511 -> ipmi_sensor,server=10.20.2.203,unit=volts,name=planar_3.3v status=1i,value=3.28 1458488465012861875 -> ipmi_sensor,server=10.20.2.203,unit=volts,name=planar_vbat status=1i,value=3.04 1458488465013072508 -> ipmi_sensor,server=10.20.2.203,unit=rpm,name=fan_1a_tach status=1i,value=2610 1458488465013137932 -> ipmi_sensor,server=10.20.2.203,unit=rpm,name=fan_1b_tach status=1i,value=1775 1458488465013279896 +ipmi_sensor,server=10.20.2.203,unit=degrees_c,name=ambient_temp status=1i,value=20 1458488465012559455 +ipmi_sensor,server=10.20.2.203,unit=feet,name=altitude status=1i,value=80 1458488465012688613 +ipmi_sensor,server=10.20.2.203,unit=watts,name=avg_power status=1i,value=220 1458488465012776511 +ipmi_sensor,server=10.20.2.203,unit=volts,name=planar_3.3v status=1i,value=3.28 1458488465012861875 +ipmi_sensor,server=10.20.2.203,unit=volts,name=planar_vbat status=1i,value=3.04 1458488465013072508 +ipmi_sensor,server=10.20.2.203,unit=rpm,name=fan_1a_tach status=1i,value=2610 1458488465013137932 +ipmi_sensor,server=10.20.2.203,unit=rpm,name=fan_1b_tach status=1i,value=1775 1458488465013279896 ``` When retrieving stats from the local machine (no server specified): ``` -> ipmi_sensor,unit=degrees_c,name=ambient_temp status=1i,value=20 1458488465012559455 -> ipmi_sensor,unit=feet,name=altitude status=1i,value=80 1458488465012688613 -> ipmi_sensor,unit=watts,name=avg_power status=1i,value=220 1458488465012776511 -> ipmi_sensor,unit=volts,name=planar_3.3v status=1i,value=3.28 1458488465012861875 -> ipmi_sensor,unit=volts,name=planar_vbat status=1i,value=3.04 1458488465013072508 -> ipmi_sensor,unit=rpm,name=fan_1a_tach status=1i,value=2610 1458488465013137932 -> ipmi_sensor,unit=rpm,name=fan_1b_tach status=1i,value=1775 1458488465013279896 +ipmi_sensor,unit=degrees_c,name=ambient_temp status=1i,value=20 1458488465012559455 +ipmi_sensor,unit=feet,name=altitude status=1i,value=80 1458488465012688613 +ipmi_sensor,unit=watts,name=avg_power status=1i,value=220 1458488465012776511 +ipmi_sensor,unit=volts,name=planar_3.3v status=1i,value=3.28 1458488465012861875 +ipmi_sensor,unit=volts,name=planar_vbat status=1i,value=3.04 1458488465013072508 +ipmi_sensor,unit=rpm,name=fan_1a_tach status=1i,value=2610 1458488465013137932 +ipmi_sensor,unit=rpm,name=fan_1b_tach status=1i,value=1775 1458488465013279896 ``` From 86f19dee2bb9d4880ed595bbd397bfe774cfc4b2 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 16 Oct 2017 11:10:06 -0700 Subject: [PATCH 41/95] Fix typo in ipmi_sensor readme --- plugins/inputs/ipmi_sensor/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/inputs/ipmi_sensor/README.md b/plugins/inputs/ipmi_sensor/README.md index 90abf393a..820af50a4 100644 --- a/plugins/inputs/ipmi_sensor/README.md +++ b/plugins/inputs/ipmi_sensor/README.md @@ -55,7 +55,7 @@ ipmitool -I lan -H SERVER -U USERID -P PASSW0RD sdr #### Permissions When gathering from the local system, Telegraf will need permission to the -impi device node. When using udev you can create the device node giving +ipmi device node. When using udev you can create the device node giving `rw` permissions to the `telegraf` user by adding the following rule to `/etc/udev/rules.d/52-telegraf-ipmi.rules`: From 1f348037b77264424c65e8390f856180777507d5 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 16 Oct 2017 11:26:16 -0700 Subject: [PATCH 42/95] Fix case sensitivity issue in sqlserver query (#3336) --- plugins/inputs/sqlserver/sqlserver.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/plugins/inputs/sqlserver/sqlserver.go b/plugins/inputs/sqlserver/sqlserver.go index 373e12f7c..cdf8ef4f2 100644 --- a/plugins/inputs/sqlserver/sqlserver.go +++ b/plugins/inputs/sqlserver/sqlserver.go @@ -245,10 +245,10 @@ UNION ALL SELECT 'Average pending disk IO', AveragePendingDiskIOCount = (SELECT AVG(pending_disk_io_count) FROM sys.dm_os_schedulers WITH (NOLOCK) WHERE scheduler_id < 255 ) UNION ALL SELECT 'Buffer pool rate (bytes/sec)', BufferPoolRate = (1.0*cntr_value * 8 * 1024) / - (SELECT 1.0*cntr_value FROM sys.dm_os_performance_counters WHERE object_name like '%Buffer Manager%' AND lower(counter_name) = 'Page life expectancy') + (SELECT 1.0*cntr_value FROM sys.dm_os_performance_counters WHERE object_name like '%Buffer Manager%' AND counter_name = 'Page life expectancy') FROM sys.dm_os_performance_counters WHERE object_name like '%Buffer Manager%' -AND counter_name = 'database pages' +AND counter_name = 'Database pages' UNION ALL SELECT 'Memory grant pending', MemoryGrantPending = cntr_value FROM sys.dm_os_performance_counters From 3ea41e885c59a14714ba0c1530fb6d6cb3a6574c Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 16 Oct 2017 11:27:00 -0700 Subject: [PATCH 43/95] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index efaeb4476..d74c507ec 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -56,6 +56,7 @@ - [#3327](https://github.com/influxdata/telegraf/issues/3327): Fix container name filters in docker input. - [#3321](https://github.com/influxdata/telegraf/issues/3321): Fix snmpwalk address format in leofs input. +- [#3329](https://github.com/influxdata/telegraf/issues/3329): Fix case sensitivity issue in sqlserver query. ## v1.4.2 [2017-10-10] From 246ffab3e08ba48d764d28cc29a2104f0b63de3a Mon Sep 17 00:00:00 2001 From: Craig Wickesser Date: Mon, 16 Oct 2017 17:18:36 -0400 Subject: [PATCH 44/95] Add UDP IPv6 support to statsd input (#3344) --- plugins/inputs/statsd/README.md | 2 +- plugins/inputs/statsd/statsd.go | 23 ++++++++++++----------- 2 files changed, 13 insertions(+), 12 deletions(-) diff --git a/plugins/inputs/statsd/README.md b/plugins/inputs/statsd/README.md index 69d77580d..9562b9362 100644 --- a/plugins/inputs/statsd/README.md +++ b/plugins/inputs/statsd/README.md @@ -5,7 +5,7 @@ ```toml # Statsd Server [[inputs.statsd]] - ## Protocol, must be "tcp" or "udp" (default=udp) + ## Protocol, must be "tcp", "udp4", "udp6" or "udp" (default=udp) protocol = "udp" ## MaxTCPConnection - applicable when protocol is set to tcp (default=250) diff --git a/plugins/inputs/statsd/statsd.go b/plugins/inputs/statsd/statsd.go index db412b549..93819cb09 100644 --- a/plugins/inputs/statsd/statsd.go +++ b/plugins/inputs/statsd/statsd.go @@ -171,7 +171,7 @@ func (_ *Statsd) Description() string { } const sampleConfig = ` - ## Protocol, must be "tcp" or "udp" (default=udp) + ## Protocol, must be "tcp", "udp", "udp4" or "udp6" (default=udp) protocol = "udp" ## MaxTCPConnection - applicable when protocol is set to tcp (default=250) @@ -327,10 +327,9 @@ func (s *Statsd) Start(_ telegraf.Accumulator) error { s.wg.Add(2) // Start the UDP listener - switch s.Protocol { - case "udp": + if s.isUDP() { go s.udpListen() - case "tcp": + } else { go s.tcpListen() } // Start the line parser @@ -382,8 +381,8 @@ func (s *Statsd) tcpListen() error { func (s *Statsd) udpListen() error { defer s.wg.Done() var err error - address, _ := net.ResolveUDPAddr("udp", s.ServiceAddress) - s.UDPlistener, err = net.ListenUDP("udp", address) + address, _ := net.ResolveUDPAddr(s.Protocol, s.ServiceAddress) + s.UDPlistener, err = net.ListenUDP(s.Protocol, address) if err != nil { log.Fatalf("ERROR: ListenUDP - %s", err) } @@ -825,10 +824,9 @@ func (s *Statsd) Stop() { s.Lock() log.Println("I! Stopping the statsd service") close(s.done) - switch s.Protocol { - case "udp": + if s.isUDP() { s.UDPlistener.Close() - case "tcp": + } else { s.TCPlistener.Close() // Close all open TCP connections // - get all conns from the s.conns map and put into slice @@ -843,8 +841,6 @@ func (s *Statsd) Stop() { for _, conn := range conns { conn.Close() } - default: - s.UDPlistener.Close() } s.Unlock() @@ -856,6 +852,11 @@ func (s *Statsd) Stop() { s.Unlock() } +// IsUDP returns true if the protocol is UDP, false otherwise. +func (s *Statsd) isUDP() bool { + return strings.HasPrefix(s.Protocol, "udp") +} + func init() { inputs.Add("statsd", func() telegraf.Input { return &Statsd{ From 4b05edea536ceabe871bae50e1ef4ead24b1a737 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 16 Oct 2017 14:19:16 -0700 Subject: [PATCH 45/95] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index d74c507ec..a66808fc4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -43,6 +43,7 @@ - [#3269](https://github.com/influxdata/telegraf/pull/3269): Add cluster health level configuration to elasticsearch input. - [#3304](https://github.com/influxdata/telegraf/pull/3304): Add ability to limit node stats in elasticsearch input. - [#2167](https://github.com/influxdata/telegraf/pull/2167): Add new basicstats aggregator. +- [#3344](https://github.com/influxdata/telegraf/pull/3344): Add UDP IPv6 support to statsd input. ### Bugfixes From f5a9d1bc75802493779f06eaf88bac69bbc9571c Mon Sep 17 00:00:00 2001 From: Pierre Fersing Date: Mon, 16 Oct 2017 23:25:00 +0200 Subject: [PATCH 46/95] Fix CPU system plugin gets stuck after system suspend (#3342) --- CHANGELOG.md | 1 + plugins/inputs/system/cpu.go | 5 ++- plugins/inputs/system/cpu_test.go | 69 +++++++++++++++++++++++++++++++ 3 files changed, 73 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index a66808fc4..cebe378fb 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -50,6 +50,7 @@ - [#3136](https://github.com/influxdata/telegraf/issues/3136): Fix webhooks input address in use during reload. - [#3258](https://github.com/influxdata/telegraf/issues/3258): Unlock Statsd when stopping to prevent deadlock. - [#3319](https://github.com/influxdata/telegraf/issues/3319): Fix cloudwatch output requires unneeded permissions. +- [#3342](https://github.com/influxdata/telegraf/pull/3342): Fix CPU input plugin stuck after suspend on Linux. ## v1.4.3 [unreleased] diff --git a/plugins/inputs/system/cpu.go b/plugins/inputs/system/cpu.go index 55378c93e..99fa451b3 100644 --- a/plugins/inputs/system/cpu.go +++ b/plugins/inputs/system/cpu.go @@ -96,7 +96,8 @@ func (s *CPUStats) Gather(acc telegraf.Accumulator) error { totalDelta := total - lastTotal if totalDelta < 0 { - return fmt.Errorf("Error: current total CPU time is less than previous total CPU time") + err = fmt.Errorf("Error: current total CPU time is less than previous total CPU time") + break } if totalDelta == 0 { @@ -126,7 +127,7 @@ func (s *CPUStats) Gather(acc telegraf.Accumulator) error { s.lastStats[cts.CPU] = cts } - return nil + return err } func totalCpuTime(t cpu.TimesStat) float64 { diff --git a/plugins/inputs/system/cpu_test.go b/plugins/inputs/system/cpu_test.go index fabff8a7d..773f8e7d1 100644 --- a/plugins/inputs/system/cpu_test.go +++ b/plugins/inputs/system/cpu_test.go @@ -184,3 +184,72 @@ func TestCPUCountIncrease(t *testing.T) { err = cs.Gather(&acc) require.NoError(t, err) } + +// TestCPUTimesDecrease tests that telegraf continue to works after +// CPU times decrease, which seems to occur when Linux system is suspended. +func TestCPUTimesDecrease(t *testing.T) { + var mps MockPS + defer mps.AssertExpectations(t) + var acc testutil.Accumulator + + cts := cpu.TimesStat{ + CPU: "cpu0", + User: 18, + Idle: 80, + Iowait: 2, + } + + cts2 := cpu.TimesStat{ + CPU: "cpu0", + User: 38, // increased by 20 + Idle: 40, // decreased by 40 + Iowait: 1, // decreased by 1 + } + + cts3 := cpu.TimesStat{ + CPU: "cpu0", + User: 56, // increased by 18 + Idle: 120, // increased by 80 + Iowait: 3, // increased by 2 + } + + mps.On("CPUTimes").Return([]cpu.TimesStat{cts}, nil) + + cs := NewCPUStats(&mps) + + cputags := map[string]string{ + "cpu": "cpu0", + } + + err := cs.Gather(&acc) + require.NoError(t, err) + + // Computed values are checked with delta > 0 becasue of floating point arithmatic + // imprecision + assertContainsTaggedFloat(t, &acc, "cpu", "time_user", 18, 0, cputags) + assertContainsTaggedFloat(t, &acc, "cpu", "time_idle", 80, 0, cputags) + assertContainsTaggedFloat(t, &acc, "cpu", "time_iowait", 2, 0, cputags) + + mps2 := MockPS{} + mps2.On("CPUTimes").Return([]cpu.TimesStat{cts2}, nil) + cs.ps = &mps2 + + // CPU times decreased. An error should be raised + err = cs.Gather(&acc) + require.Error(t, err) + + mps3 := MockPS{} + mps3.On("CPUTimes").Return([]cpu.TimesStat{cts3}, nil) + cs.ps = &mps3 + + err = cs.Gather(&acc) + require.NoError(t, err) + + assertContainsTaggedFloat(t, &acc, "cpu", "time_user", 56, 0, cputags) + assertContainsTaggedFloat(t, &acc, "cpu", "time_idle", 120, 0, cputags) + assertContainsTaggedFloat(t, &acc, "cpu", "time_iowait", 3, 0, cputags) + + assertContainsTaggedFloat(t, &acc, "cpu", "usage_user", 18, 0.0005, cputags) + assertContainsTaggedFloat(t, &acc, "cpu", "usage_idle", 80, 0.0005, cputags) + assertContainsTaggedFloat(t, &acc, "cpu", "usage_iowait", 2, 0.0005, cputags) +} From 9d8e935734a4ce1b2ae59ecb1748394754d6d5c3 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 16 Oct 2017 14:26:12 -0700 Subject: [PATCH 47/95] Update changelog --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index cebe378fb..3c1524c64 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -50,7 +50,6 @@ - [#3136](https://github.com/influxdata/telegraf/issues/3136): Fix webhooks input address in use during reload. - [#3258](https://github.com/influxdata/telegraf/issues/3258): Unlock Statsd when stopping to prevent deadlock. - [#3319](https://github.com/influxdata/telegraf/issues/3319): Fix cloudwatch output requires unneeded permissions. -- [#3342](https://github.com/influxdata/telegraf/pull/3342): Fix CPU input plugin stuck after suspend on Linux. ## v1.4.3 [unreleased] @@ -59,6 +58,7 @@ - [#3327](https://github.com/influxdata/telegraf/issues/3327): Fix container name filters in docker input. - [#3321](https://github.com/influxdata/telegraf/issues/3321): Fix snmpwalk address format in leofs input. - [#3329](https://github.com/influxdata/telegraf/issues/3329): Fix case sensitivity issue in sqlserver query. +- [#3342](https://github.com/influxdata/telegraf/pull/3342): Fix CPU input plugin stuck after suspend on Linux. ## v1.4.2 [2017-10-10] From 2208657d73f8ca9b9c854ce8fdf598868bbfe1f6 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 17 Oct 2017 10:43:53 -0700 Subject: [PATCH 48/95] Add release date info to FAQ --- docs/FAQ.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/docs/FAQ.md b/docs/FAQ.md index d756bb2f3..58396cbcd 100644 --- a/docs/FAQ.md +++ b/docs/FAQ.md @@ -20,3 +20,8 @@ If running as a service add the environment variable to `/etc/default/telegraf`: ``` GODEBUG=netdns=cgo ``` + +### Q: When will the next version be released? + +The latest release date estimate can be viewed on the +[milestones](https://github.com/influxdata/telegraf/milestones) page. From a75ab3e190234fbc7335d0bac0088f719b54d054 Mon Sep 17 00:00:00 2001 From: Ayrdrie Date: Wed, 18 Oct 2017 15:24:30 -0400 Subject: [PATCH 49/95] Fix mongodb input panic when restarting mongodb (#3355) --- plugins/inputs/mongodb/mongostat.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/inputs/mongodb/mongostat.go b/plugins/inputs/mongodb/mongostat.go index cb7710798..a2e270bbb 100644 --- a/plugins/inputs/mongodb/mongostat.go +++ b/plugins/inputs/mongodb/mongostat.go @@ -538,7 +538,7 @@ func NewStatLine(oldMongo, newMongo MongoStatus, key string, all bool, sampleSec returnVal.Command = diff(newStat.Opcounters.Command, oldStat.Opcounters.Command, sampleSecs) } - if newStat.Metrics != nil && newStat.Metrics.TTL != nil && oldStat.Metrics.TTL != nil { + if newStat.Metrics != nil && newStat.Metrics.TTL != nil && oldStat.Metrics != nil && oldStat.Metrics.TTL != nil { returnVal.Passes = diff(newStat.Metrics.TTL.Passes, oldStat.Metrics.TTL.Passes, sampleSecs) returnVal.DeletedDocuments = diff(newStat.Metrics.TTL.DeletedDocuments, oldStat.Metrics.TTL.DeletedDocuments, sampleSecs) } From 7ba376964cb1e532dac0784dd091f89bf6b1031d Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 18 Oct 2017 12:25:46 -0700 Subject: [PATCH 50/95] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 3c1524c64..0530aa679 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -59,6 +59,7 @@ - [#3321](https://github.com/influxdata/telegraf/issues/3321): Fix snmpwalk address format in leofs input. - [#3329](https://github.com/influxdata/telegraf/issues/3329): Fix case sensitivity issue in sqlserver query. - [#3342](https://github.com/influxdata/telegraf/pull/3342): Fix CPU input plugin stuck after suspend on Linux. +- [#3013](https://github.com/influxdata/telegraf/issues/3013): Fix mongodb input panic when restarting mongodb. ## v1.4.2 [2017-10-10] From a4fa19252f2a6345b45380fbc59ce9a463d74cc8 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 18 Oct 2017 12:47:58 -0700 Subject: [PATCH 51/95] Update changelog --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 0530aa679..dcd1b7a8f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -26,7 +26,6 @@ - [#3191](https://github.com/influxdata/telegraf/pull/3191): TLS and MTLS enhancements to HTTPListener input plugin. - [#3213](https://github.com/influxdata/telegraf/pull/3213): Add polling method to logparser and tail inputs. - [#3211](https://github.com/influxdata/telegraf/pull/3211): Add timeout option for kubernetes input. -- [#3224](https://github.com/influxdata/telegraf/pull/3224): Preserve url path prefix in influx output. - [#3234](https://github.com/influxdata/telegraf/pull/3234): Add support for timing sums in statsd input. - [#2617](https://github.com/influxdata/telegraf/issues/2617): Add resource limit monitoring to procstat. - [#3236](https://github.com/influxdata/telegraf/pull/3236): Add support for k8s service DNS discovery to prometheus input. @@ -60,6 +59,7 @@ - [#3329](https://github.com/influxdata/telegraf/issues/3329): Fix case sensitivity issue in sqlserver query. - [#3342](https://github.com/influxdata/telegraf/pull/3342): Fix CPU input plugin stuck after suspend on Linux. - [#3013](https://github.com/influxdata/telegraf/issues/3013): Fix mongodb input panic when restarting mongodb. +- [#3224](https://github.com/influxdata/telegraf/pull/3224): Preserve url path prefix in influx output. ## v1.4.2 [2017-10-10] From 02baa696c3775529369c44f6209f824799dbcc71 Mon Sep 17 00:00:00 2001 From: clheikes <24684947+clheikes@users.noreply.github.com> Date: Wed, 18 Oct 2017 15:57:32 -0500 Subject: [PATCH 52/95] Fix TELEGRAF_OPTS expansion in systemd service unit (#3354) --- scripts/telegraf.service | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/telegraf.service b/scripts/telegraf.service index cbbdc872d..ff9860d5c 100644 --- a/scripts/telegraf.service +++ b/scripts/telegraf.service @@ -6,7 +6,7 @@ After=network.target [Service] EnvironmentFile=-/etc/default/telegraf User=telegraf -ExecStart=/usr/bin/telegraf -config /etc/telegraf/telegraf.conf -config-directory /etc/telegraf/telegraf.d ${TELEGRAF_OPTS} +ExecStart=/usr/bin/telegraf -config /etc/telegraf/telegraf.conf -config-directory /etc/telegraf/telegraf.d $TELEGRAF_OPTS ExecReload=/bin/kill -HUP $MAINPID Restart=on-failure RestartForceExitStatus=SIGPIPE From 9b59cdd10e05ecb6d404674398c311bb4a5006a3 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 18 Oct 2017 13:57:58 -0700 Subject: [PATCH 53/95] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index dcd1b7a8f..8e0350fb1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -60,6 +60,7 @@ - [#3342](https://github.com/influxdata/telegraf/pull/3342): Fix CPU input plugin stuck after suspend on Linux. - [#3013](https://github.com/influxdata/telegraf/issues/3013): Fix mongodb input panic when restarting mongodb. - [#3224](https://github.com/influxdata/telegraf/pull/3224): Preserve url path prefix in influx output. +- [#3354](https://github.com/influxdata/telegraf/pull/3354): Fix TELEGRAF_OPTS expansion in systemd service unit. ## v1.4.2 [2017-10-10] From 6e5915c59f17c31ae63797dcc4e0bc609b10cd1f Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 18 Oct 2017 14:51:08 -0700 Subject: [PATCH 54/95] Fix prometheus passthrough for existing value types (#3351) --- plugins/inputs/prometheus/parser.go | 13 +++- plugins/inputs/prometheus/prometheus.go | 10 ++- .../prometheus_client/prometheus_client.go | 42 ++++++++-- .../prometheus_client_test.go | 76 +++++++++++++++---- 4 files changed, 120 insertions(+), 21 deletions(-) diff --git a/plugins/inputs/prometheus/parser.go b/plugins/inputs/prometheus/parser.go index ac5c608f6..0807d7e7a 100644 --- a/plugins/inputs/prometheus/parser.go +++ b/plugins/inputs/prometheus/parser.go @@ -86,7 +86,7 @@ func Parse(buf []byte, header http.Header) ([]telegraf.Metric, error) { } else { t = time.Now() } - metric, err := metric.New(metricName, tags, fields, t) + metric, err := metric.New(metricName, tags, fields, t, valueType(mf.GetType())) if err == nil { metrics = append(metrics, metric) } @@ -97,6 +97,17 @@ func Parse(buf []byte, header http.Header) ([]telegraf.Metric, error) { return metrics, err } +func valueType(mt dto.MetricType) telegraf.ValueType { + switch mt { + case dto.MetricType_COUNTER: + return telegraf.Counter + case dto.MetricType_GAUGE: + return telegraf.Gauge + default: + return telegraf.Untyped + } +} + // Get Quantiles from summary metric func makeQuantiles(m *dto.Metric) map[string]interface{} { fields := make(map[string]interface{}) diff --git a/plugins/inputs/prometheus/prometheus.go b/plugins/inputs/prometheus/prometheus.go index 7c3943bd0..5445a12a3 100644 --- a/plugins/inputs/prometheus/prometheus.go +++ b/plugins/inputs/prometheus/prometheus.go @@ -218,7 +218,15 @@ func (p *Prometheus) gatherURL(url UrlAndAddress, acc telegraf.Accumulator) erro if url.Address != "" { tags["address"] = url.Address } - acc.AddFields(metric.Name(), metric.Fields(), tags, metric.Time()) + + switch metric.Type() { + case telegraf.Counter: + acc.AddCounter(metric.Name(), metric.Fields(), tags, metric.Time()) + case telegraf.Gauge: + acc.AddGauge(metric.Name(), metric.Fields(), tags, metric.Time()) + default: + acc.AddFields(metric.Name(), metric.Fields(), tags, metric.Time()) + } } return nil diff --git a/plugins/outputs/prometheus_client/prometheus_client.go b/plugins/outputs/prometheus_client/prometheus_client.go index 92addf9c0..48b625513 100644 --- a/plugins/outputs/prometheus_client/prometheus_client.go +++ b/plugins/outputs/prometheus_client/prometheus_client.go @@ -5,6 +5,7 @@ import ( "fmt" "log" "net/http" + "os" "regexp" "sort" "strings" @@ -15,6 +16,7 @@ import ( "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/outputs" "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promhttp" ) var invalidNameCharRE = regexp.MustCompile(`[^a-zA-Z0-9_]`) @@ -46,6 +48,7 @@ type PrometheusClient struct { Listen string ExpirationInterval internal.Duration `toml:"expiration_interval"` Path string `toml:"path"` + CollectorsExclude []string `toml:"collectors_exclude"` server *http.Server @@ -62,11 +65,26 @@ var sampleConfig = ` ## Interval to expire metrics and not deliver to prometheus, 0 == no expiration # expiration_interval = "60s" + + ## Collectors to enable, valid entries are "gocollector" and "process". + ## If unset, both are enabled. + collectors_exclude = ["gocollector", "process"] ` func (p *PrometheusClient) Start() error { prometheus.Register(p) + for _, collector := range p.CollectorsExclude { + switch collector { + case "gocollector": + prometheus.Unregister(prometheus.NewGoCollector()) + case "process": + prometheus.Unregister(prometheus.NewProcessCollector(os.Getpid(), "")) + default: + return fmt.Errorf("unrecognized collector %s", collector) + } + } + if p.Listen == "" { p.Listen = "localhost:9273" } @@ -76,7 +94,9 @@ func (p *PrometheusClient) Start() error { } mux := http.NewServeMux() - mux.Handle(p.Path, prometheus.Handler()) + mux.Handle(p.Path, promhttp.HandlerFor( + prometheus.DefaultGatherer, + promhttp.HandlerOpts{ErrorHandling: promhttp.ContinueOnError})) p.server = &http.Server{ Addr: p.Listen, @@ -243,10 +263,22 @@ func (p *PrometheusClient) Write(metrics []telegraf.Metric) error { // Special handling of value field; supports passthrough from // the prometheus input. var mname string - if fn == "value" { - mname = sanitize(point.Name()) - } else { - mname = sanitize(fmt.Sprintf("%s_%s", point.Name(), fn)) + switch point.Type() { + case telegraf.Counter: + if fn == "counter" { + mname = sanitize(point.Name()) + } + case telegraf.Gauge: + if fn == "gauge" { + mname = sanitize(point.Name()) + } + } + if mname == "" { + if fn == "value" { + mname = sanitize(point.Name()) + } else { + mname = sanitize(fmt.Sprintf("%s_%s", point.Name(), fn)) + } } var fam *MetricFamily diff --git a/plugins/outputs/prometheus_client/prometheus_client_test.go b/plugins/outputs/prometheus_client/prometheus_client_test.go index 767f9a878..a997c0401 100644 --- a/plugins/outputs/prometheus_client/prometheus_client_test.go +++ b/plugins/outputs/prometheus_client/prometheus_client_test.go @@ -107,21 +107,69 @@ func TestWrite_SkipNonNumberField(t *testing.T) { require.False(t, ok) } -func TestWrite_Counter(t *testing.T) { - client := NewClient() +func TestWrite_Counters(t *testing.T) { + type args struct { + measurement string + tags map[string]string + fields map[string]interface{} + valueType telegraf.ValueType + } + var tests = []struct { + name string + args args + err error + metricName string + promType prometheus.ValueType + }{ + { + name: "field named value is not added to metric name", + args: args{ + measurement: "foo", + fields: map[string]interface{}{"value": 42}, + valueType: telegraf.Counter, + }, + metricName: "foo", + promType: prometheus.CounterValue, + }, + { + name: "field named counter is not added to metric name", + args: args{ + measurement: "foo", + fields: map[string]interface{}{"counter": 42}, + valueType: telegraf.Counter, + }, + metricName: "foo", + promType: prometheus.CounterValue, + }, + { + name: "field with any other name is added to metric name", + args: args{ + measurement: "foo", + fields: map[string]interface{}{"other": 42}, + valueType: telegraf.Counter, + }, + metricName: "foo_other", + promType: prometheus.CounterValue, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + m, err := metric.New( + tt.args.measurement, + tt.args.tags, + tt.args.fields, + time.Now(), + tt.args.valueType, + ) + client := NewClient() + err = client.Write([]telegraf.Metric{m}) + require.Equal(t, tt.err, err) - p1, err := metric.New( - "foo", - make(map[string]string), - map[string]interface{}{"value": 42}, - time.Now(), - telegraf.Counter) - err = client.Write([]telegraf.Metric{p1}) - require.NoError(t, err) - - fam, ok := client.fam["foo"] - require.True(t, ok) - require.Equal(t, prometheus.CounterValue, fam.ValueType) + fam, ok := client.fam[tt.metricName] + require.True(t, ok) + require.Equal(t, tt.promType, fam.ValueType) + }) + } } func TestWrite_Sanitize(t *testing.T) { From adb1f5588c13b94464339b43cb61f080e287c525 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 18 Oct 2017 14:53:34 -0700 Subject: [PATCH 55/95] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8e0350fb1..6667bcaf1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -49,6 +49,7 @@ - [#3136](https://github.com/influxdata/telegraf/issues/3136): Fix webhooks input address in use during reload. - [#3258](https://github.com/influxdata/telegraf/issues/3258): Unlock Statsd when stopping to prevent deadlock. - [#3319](https://github.com/influxdata/telegraf/issues/3319): Fix cloudwatch output requires unneeded permissions. +- [#3351](https://github.com/influxdata/telegraf/issues/3351): Fix prometheus passthrough for existing value types. ## v1.4.3 [unreleased] From ec4efe5b035d7123e977b721c3022e23350496f6 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 18 Oct 2017 17:42:30 -0700 Subject: [PATCH 56/95] Use labels in prometheus output for string fields (#3350) --- .../prometheus_client/prometheus_client.go | 9 ++++++ .../prometheus_client_test.go | 28 +++++++++++++++++++ 2 files changed, 37 insertions(+) diff --git a/plugins/outputs/prometheus_client/prometheus_client.go b/plugins/outputs/prometheus_client/prometheus_client.go index 48b625513..d7702a062 100644 --- a/plugins/outputs/prometheus_client/prometheus_client.go +++ b/plugins/outputs/prometheus_client/prometheus_client.go @@ -242,6 +242,15 @@ func (p *PrometheusClient) Write(metrics []telegraf.Metric) error { labels[sanitize(k)] = v } + // Prometheus doesn't have a string value type, so convert string + // fields to labels. + for fn, fv := range point.Fields() { + switch fv := fv.(type) { + case string: + labels[sanitize(fn)] = fv + } + } + for fn, fv := range point.Fields() { // Ignore string and bool fields. var value float64 diff --git a/plugins/outputs/prometheus_client/prometheus_client_test.go b/plugins/outputs/prometheus_client/prometheus_client_test.go index a997c0401..1bb1cc83a 100644 --- a/plugins/outputs/prometheus_client/prometheus_client_test.go +++ b/plugins/outputs/prometheus_client/prometheus_client_test.go @@ -324,6 +324,34 @@ func TestWrite_Tags(t *testing.T) { require.True(t, now.Before(sample2.Expiration)) } +func TestWrite_StringFields(t *testing.T) { + now := time.Now() + p1, err := metric.New( + "foo", + make(map[string]string), + map[string]interface{}{"value": 1.0, "status": "good"}, + now, + telegraf.Counter) + p2, err := metric.New( + "bar", + make(map[string]string), + map[string]interface{}{"status": "needs numeric field"}, + now, + telegraf.Gauge) + var metrics = []telegraf.Metric{p1, p2} + + client := NewClient() + err = client.Write(metrics) + require.NoError(t, err) + + fam, ok := client.fam["foo"] + require.True(t, ok) + require.Equal(t, 1, fam.LabelSet["status"]) + + fam, ok = client.fam["bar"] + require.False(t, ok) +} + func TestExpire(t *testing.T) { client := NewClient() From 0e6a70b1997a848abcd3ed604a8ce0e275465398 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 18 Oct 2017 17:43:01 -0700 Subject: [PATCH 57/95] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 6667bcaf1..25cc8d14c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -43,6 +43,7 @@ - [#3304](https://github.com/influxdata/telegraf/pull/3304): Add ability to limit node stats in elasticsearch input. - [#2167](https://github.com/influxdata/telegraf/pull/2167): Add new basicstats aggregator. - [#3344](https://github.com/influxdata/telegraf/pull/3344): Add UDP IPv6 support to statsd input. +- [#3350](https://github.com/influxdata/telegraf/pull/3350): Use labels in prometheus output for string fields. ### Bugfixes From 3bbc2beeeda21c0ea4ba5a3fbe8f8a4b2ee84d88 Mon Sep 17 00:00:00 2001 From: Mamat Rahmat Date: Fri, 20 Oct 2017 04:47:40 +0700 Subject: [PATCH 58/95] Fix small typo in documentation (#3364) --- docs/WINDOWS_SERVICE.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/WINDOWS_SERVICE.md b/docs/WINDOWS_SERVICE.md index c50a10785..2b4d4f0e2 100644 --- a/docs/WINDOWS_SERVICE.md +++ b/docs/WINDOWS_SERVICE.md @@ -38,7 +38,7 @@ Telegraf can manage its own service through the --service flag: | `telegraf.exe --service stop` | Stop the telegraf service | -Trobleshooting common error #1067 +Troubleshooting common error #1067 When installing as service in Windows, always double check to specify full path of the config file, otherwise windows service will fail to start From 424340343248db788dbeb969407d27c8e3072d7d Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 19 Oct 2017 16:25:58 -0700 Subject: [PATCH 59/95] Remove warning when JSON contains null value (#3359) --- plugins/parsers/json/parser.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/plugins/parsers/json/parser.go b/plugins/parsers/json/parser.go index 50b3d3682..ecf9996af 100644 --- a/plugins/parsers/json/parser.go +++ b/plugins/parsers/json/parser.go @@ -159,8 +159,6 @@ func (f *JSONFlattener) FullFlattenJSON( return nil } case nil: - // ignored types - fmt.Println("json parser ignoring " + fieldname) return nil default: return fmt.Errorf("JSON Flattener: got unexpected type %T with value %v (%s)", From 7088d98304455a9ee7b772c3216ec14c12326e04 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 19 Oct 2017 16:27:29 -0700 Subject: [PATCH 60/95] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 25cc8d14c..34c2241f1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -63,6 +63,7 @@ - [#3013](https://github.com/influxdata/telegraf/issues/3013): Fix mongodb input panic when restarting mongodb. - [#3224](https://github.com/influxdata/telegraf/pull/3224): Preserve url path prefix in influx output. - [#3354](https://github.com/influxdata/telegraf/pull/3354): Fix TELEGRAF_OPTS expansion in systemd service unit. +- [#3357](https://github.com/influxdata/telegraf/issues/3357): Remove warning when JSON contains null value. ## v1.4.2 [2017-10-10] From 4deb6238a376e76296d8ef354524eaa93d685632 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 19 Oct 2017 16:36:32 -0700 Subject: [PATCH 61/95] Add support for decimal timestamps to ts-epoch modifier (#3358) --- plugins/inputs/logparser/README.md | 15 ++++- plugins/inputs/logparser/grok/grok.go | 28 +++++++-- plugins/inputs/logparser/grok/grok_test.go | 71 ++++++++++++++++++++++ 3 files changed, 108 insertions(+), 6 deletions(-) diff --git a/plugins/inputs/logparser/README.md b/plugins/inputs/logparser/README.md index f823cfe68..2febb8194 100644 --- a/plugins/inputs/logparser/README.md +++ b/plugins/inputs/logparser/README.md @@ -100,7 +100,7 @@ current time. - ts-rfc3339 ("2006-01-02T15:04:05Z07:00") - ts-rfc3339nano ("2006-01-02T15:04:05.999999999Z07:00") - ts-httpd ("02/Jan/2006:15:04:05 -0700") - - ts-epoch (seconds since unix epoch) + - ts-epoch (seconds since unix epoch, may contain decimal) - ts-epochnano (nanoseconds since unix epoch) - ts-"CUSTOM" @@ -130,6 +130,19 @@ This example input and config parses a file using a custom timestamp conversion: patterns = ['%{TIMESTAMP_ISO8601:timestamp:ts-"2006-01-02 15:04:05"} value=%{NUMBER:value:int}'] ``` +This example input and config parses a file using a timestamp in unix time: + +``` +1466004605 value=42 +1466004605.123456789 value=42 +``` + +```toml +[[inputs.logparser]] + [inputs.logparser.grok] + patterns = ['%{NUMBER:timestamp:ts-epoch} value=%{NUMBER:value:int}'] +``` + This example parses a file using a built-in conversion and a custom pattern: ``` diff --git a/plugins/inputs/logparser/grok/grok.go b/plugins/inputs/logparser/grok/grok.go index 0be18b54c..491a13748 100644 --- a/plugins/inputs/logparser/grok/grok.go +++ b/plugins/inputs/logparser/grok/grok.go @@ -253,12 +253,30 @@ func (p *Parser) ParseLine(line string) (telegraf.Metric, error) { case STRING: fields[k] = strings.Trim(v, `"`) case EPOCH: - iv, err := strconv.ParseInt(v, 10, 64) - if err != nil { - log.Printf("E! Error parsing %s to int: %s", v, err) - } else { - timestamp = time.Unix(iv, 0) + parts := strings.SplitN(v, ".", 2) + if len(parts) == 0 { + log.Printf("E! Error parsing %s to timestamp: %s", v, err) + break } + + sec, err := strconv.ParseInt(parts[0], 10, 64) + if err != nil { + log.Printf("E! Error parsing %s to timestamp: %s", v, err) + break + } + ts := time.Unix(sec, 0) + + if len(parts) == 2 { + padded := fmt.Sprintf("%-9s", parts[1]) + nsString := strings.Replace(padded[:9], " ", "0", -1) + nanosec, err := strconv.ParseInt(nsString, 10, 64) + if err != nil { + log.Printf("E! Error parsing %s to timestamp: %s", v, err) + break + } + ts = ts.Add(time.Duration(nanosec) * time.Nanosecond) + } + timestamp = ts case EPOCH_NANO: iv, err := strconv.ParseInt(v, 10, 64) if err != nil { diff --git a/plugins/inputs/logparser/grok/grok_test.go b/plugins/inputs/logparser/grok/grok_test.go index 6d07b6eca..480502d6c 100644 --- a/plugins/inputs/logparser/grok/grok_test.go +++ b/plugins/inputs/logparser/grok/grok_test.go @@ -385,6 +385,77 @@ func TestParseEpoch(t *testing.T) { assert.Equal(t, time.Unix(1466004605, 0), metricA.Time()) } +func TestParseEpochDecimal(t *testing.T) { + var tests = []struct { + name string + line string + noMatch bool + err error + tags map[string]string + fields map[string]interface{} + time time.Time + }{ + { + name: "ns precision", + line: "1466004605.359052000 value=42", + tags: map[string]string{}, + fields: map[string]interface{}{ + "value": int64(42), + }, + time: time.Unix(0, 1466004605359052000), + }, + { + name: "ms precision", + line: "1466004605.359 value=42", + tags: map[string]string{}, + fields: map[string]interface{}{ + "value": int64(42), + }, + time: time.Unix(0, 1466004605359000000), + }, + { + name: "second precision", + line: "1466004605 value=42", + tags: map[string]string{}, + fields: map[string]interface{}{ + "value": int64(42), + }, + time: time.Unix(0, 1466004605000000000), + }, + { + name: "sub ns precision", + line: "1466004605.123456789123 value=42", + tags: map[string]string{}, + fields: map[string]interface{}{ + "value": int64(42), + }, + time: time.Unix(0, 1466004605123456789), + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + parser := &Parser{ + Patterns: []string{"%{NUMBER:ts:ts-epoch} value=%{NUMBER:value:int}"}, + } + assert.NoError(t, parser.Compile()) + m, err := parser.ParseLine(tt.line) + + if tt.noMatch { + require.Nil(t, m) + require.Nil(t, err) + return + } + + require.Equal(t, tt.err, err) + + require.NotNil(t, m) + require.Equal(t, tt.tags, m.Tags()) + require.Equal(t, tt.fields, m.Fields()) + require.Equal(t, tt.time, m.Time()) + }) + } +} + func TestParseEpochErrors(t *testing.T) { p := &Parser{ Patterns: []string{"%{MYAPP}"}, From 77cc071796afd41fd14601c74f71f448dd7b0c07 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 19 Oct 2017 17:06:14 -0700 Subject: [PATCH 62/95] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 34c2241f1..37b1938e3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -44,6 +44,7 @@ - [#2167](https://github.com/influxdata/telegraf/pull/2167): Add new basicstats aggregator. - [#3344](https://github.com/influxdata/telegraf/pull/3344): Add UDP IPv6 support to statsd input. - [#3350](https://github.com/influxdata/telegraf/pull/3350): Use labels in prometheus output for string fields. +- [#3358](https://github.com/influxdata/telegraf/pull/3358): Add support for decimal timestamps to ts-epoch modifier. ### Bugfixes From d2f9fc7d8cf8618fcc4aa6d01b1cb13e5499bade Mon Sep 17 00:00:00 2001 From: Sergei Smolianinov Date: Mon, 23 Oct 2017 22:31:27 +0300 Subject: [PATCH 63/95] Fix ACL token usage in consul input plugin (#3376) --- plugins/inputs/consul/consul.go | 4 ++++ plugins/inputs/consul/consul_test.go | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/plugins/inputs/consul/consul.go b/plugins/inputs/consul/consul.go index 0eaa25604..45612a928 100644 --- a/plugins/inputs/consul/consul.go +++ b/plugins/inputs/consul/consul.go @@ -69,6 +69,10 @@ func (c *Consul) createAPIClient() (*api.Client, error) { config.Datacenter = c.Datacentre } + if c.Token != "" { + config.Token = c.Token + } + if c.Username != "" { config.HttpAuth = &api.HttpBasicAuth{ Username: c.Username, diff --git a/plugins/inputs/consul/consul_test.go b/plugins/inputs/consul/consul_test.go index d0595508d..bbb43d066 100644 --- a/plugins/inputs/consul/consul_test.go +++ b/plugins/inputs/consul/consul_test.go @@ -20,7 +20,7 @@ var sampleChecks = []*api.HealthCheck{ }, } -func TestGatherHealtCheck(t *testing.T) { +func TestGatherHealthCheck(t *testing.T) { expectedFields := map[string]interface{}{ "check_name": "foo.health", "status": "passing", From 65580759fc48f8074f2a827337f31637f5738016 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 23 Oct 2017 12:36:31 -0700 Subject: [PATCH 64/95] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 37b1938e3..bca294cac 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -65,6 +65,7 @@ - [#3224](https://github.com/influxdata/telegraf/pull/3224): Preserve url path prefix in influx output. - [#3354](https://github.com/influxdata/telegraf/pull/3354): Fix TELEGRAF_OPTS expansion in systemd service unit. - [#3357](https://github.com/influxdata/telegraf/issues/3357): Remove warning when JSON contains null value. +- [#3375](https://github.com/influxdata/telegraf/issues/3375): Fix ACL token usage in consul input plugin. ## v1.4.2 [2017-10-10] From 6d1777276c449da76cec83b927998043f1523f8f Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 24 Oct 2017 15:36:23 -0700 Subject: [PATCH 65/95] If the connector name cannot be unquoted, use the raw value (#3371) --- plugins/inputs/tomcat/tomcat.go | 2 +- plugins/inputs/tomcat/tomcat_test.go | 66 ++++++++++++++++++++++++++-- 2 files changed, 63 insertions(+), 5 deletions(-) diff --git a/plugins/inputs/tomcat/tomcat.go b/plugins/inputs/tomcat/tomcat.go index b9dcc731d..dd3c03ce3 100644 --- a/plugins/inputs/tomcat/tomcat.go +++ b/plugins/inputs/tomcat/tomcat.go @@ -165,7 +165,7 @@ func (s *Tomcat) Gather(acc telegraf.Accumulator) error { for _, c := range status.TomcatConnectors { name, err := strconv.Unquote(c.Name) if err != nil { - return fmt.Errorf("Unable to unquote name '%s': %s", c.Name, err) + name = c.Name } tccTags := map[string]string{ diff --git a/plugins/inputs/tomcat/tomcat_test.go b/plugins/inputs/tomcat/tomcat_test.go index 8bdddd6d0..5e206ab83 100644 --- a/plugins/inputs/tomcat/tomcat_test.go +++ b/plugins/inputs/tomcat/tomcat_test.go @@ -11,7 +11,7 @@ import ( "github.com/stretchr/testify/require" ) -var tomcatStatus = ` +var tomcatStatus8 = ` @@ -37,10 +37,10 @@ var tomcatStatus = ` ` -func TestHTTPTomcat(t *testing.T) { +func TestHTTPTomcat8(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) - fmt.Fprintln(w, tomcatStatus) + fmt.Fprintln(w, tomcatStatus8) })) defer ts.Close() @@ -91,5 +91,63 @@ func TestHTTPTomcat(t *testing.T) { "name": "http-apr-8080", } acc.AssertContainsTaggedFields(t, "tomcat_connector", connectorFields, connectorTags) - +} + +var tomcatStatus6 = ` + + + + + + + + + + + + + +` + +func TestHTTPTomcat6(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + fmt.Fprintln(w, tomcatStatus6) + })) + defer ts.Close() + + tc := Tomcat{ + URL: ts.URL, + Username: "tomcat", + Password: "s3cret", + } + + var acc testutil.Accumulator + err := tc.Gather(&acc) + require.NoError(t, err) + + // tomcat_jvm_memory + jvmMemoryFields := map[string]interface{}{ + "free": int64(1942681600), + "total": int64(2040070144), + "max": int64(2040070144), + } + acc.AssertContainsFields(t, "tomcat_jvm_memory", jvmMemoryFields) + + // tomcat_connector + connectorFields := map[string]interface{}{ + "bytes_received": int64(0), + "bytes_sent": int64(550196), + "current_thread_count": int64(2), + "current_threads_busy": int64(2), + "error_count": int(16), + "max_threads": int64(150), + "max_time": int(1005), + "processing_time": int(2465), + "request_count": int(436), + } + connectorTags := map[string]string{ + "name": "http-8080", + } + acc.AssertContainsTaggedFields(t, "tomcat_connector", connectorFields, connectorTags) } From f64cf89db10ffa3c3562b9a2d46263357209b032 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 24 Oct 2017 15:46:47 -0700 Subject: [PATCH 66/95] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index bca294cac..f1461363b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -66,6 +66,7 @@ - [#3354](https://github.com/influxdata/telegraf/pull/3354): Fix TELEGRAF_OPTS expansion in systemd service unit. - [#3357](https://github.com/influxdata/telegraf/issues/3357): Remove warning when JSON contains null value. - [#3375](https://github.com/influxdata/telegraf/issues/3375): Fix ACL token usage in consul input plugin. +- [#3369](https://github.com/influxdata/telegraf/issues/3369): Fix unquoting error with Tomcat 6. ## v1.4.2 [2017-10-10] From 9a062498e73cfff6cb8b1e826832b6cf2990d1da Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 24 Oct 2017 16:22:31 -0700 Subject: [PATCH 67/95] Use golang.org/x/sys/unix instead of syscall in diskio (#3384) --- plugins/inputs/system/disk.go | 23 +++++++++--------- plugins/inputs/system/disk_linux.go | 36 +++++++++++++++-------------- 2 files changed, 30 insertions(+), 29 deletions(-) diff --git a/plugins/inputs/system/disk.go b/plugins/inputs/system/disk.go index ddacbab46..3cc99de05 100644 --- a/plugins/inputs/system/disk.go +++ b/plugins/inputs/system/disk.go @@ -2,6 +2,7 @@ package system import ( "fmt" + "log" "regexp" "strings" @@ -166,14 +167,13 @@ func (s *DiskIOStats) Gather(acc telegraf.Accumulator) error { var varRegex = regexp.MustCompile(`\$(?:\w+|\{\w+\})`) func (s *DiskIOStats) diskName(devName string) string { - di, err := s.diskInfo(devName) - if err != nil { - // discard error :-( - // We can't return error because it's non-fatal to the Gather(). - // And we have no logger, so we can't log it. + if len(s.NameTemplates) == 0 { return devName } - if di == nil { + + di, err := s.diskInfo(devName) + if err != nil { + log.Printf("W! Error gathering disk info: %s", err) return devName } @@ -200,14 +200,13 @@ func (s *DiskIOStats) diskName(devName string) string { } func (s *DiskIOStats) diskTags(devName string) map[string]string { - di, err := s.diskInfo(devName) - if err != nil { - // discard error :-( - // We can't return error because it's non-fatal to the Gather(). - // And we have no logger, so we can't log it. + if len(s.DeviceTags) == 0 { return nil } - if di == nil { + + di, err := s.diskInfo(devName) + if err != nil { + log.Printf("W! Error gathering disk info: %s", err) return nil } diff --git a/plugins/inputs/system/disk_linux.go b/plugins/inputs/system/disk_linux.go index e5a0cff55..d3fd691c4 100644 --- a/plugins/inputs/system/disk_linux.go +++ b/plugins/inputs/system/disk_linux.go @@ -5,25 +5,26 @@ import ( "fmt" "os" "strings" - "syscall" + + "golang.org/x/sys/unix" ) type diskInfoCache struct { - stat syscall.Stat_t - values map[string]string + udevDataPath string + values map[string]string } var udevPath = "/run/udev/data" func (s *DiskIOStats) diskInfo(devName string) (map[string]string, error) { - fi, err := os.Stat("/dev/" + devName) + var err error + var stat unix.Stat_t + + path := "/dev/" + devName + err = unix.Stat(path, &stat) if err != nil { return nil, err } - stat, ok := fi.Sys().(*syscall.Stat_t) - if !ok { - return nil, nil - } if s.infoCache == nil { s.infoCache = map[string]diskInfoCache{} @@ -31,25 +32,26 @@ func (s *DiskIOStats) diskInfo(devName string) (map[string]string, error) { ic, ok := s.infoCache[devName] if ok { return ic.values, nil - } else { - ic = diskInfoCache{ - stat: *stat, - values: map[string]string{}, - } - s.infoCache[devName] = ic } - di := ic.values major := stat.Rdev >> 8 & 0xff minor := stat.Rdev & 0xff + udevDataPath := fmt.Sprintf("%s/b%d:%d", udevPath, major, minor) - f, err := os.Open(fmt.Sprintf("%s/b%d:%d", udevPath, major, minor)) + di := map[string]string{} + + s.infoCache[devName] = diskInfoCache{ + udevDataPath: udevDataPath, + values: di, + } + + f, err := os.Open(udevDataPath) if err != nil { return nil, err } defer f.Close() - scnr := bufio.NewScanner(f) + scnr := bufio.NewScanner(f) for scnr.Scan() { l := scnr.Text() if len(l) < 4 || l[:2] != "E:" { From 13c1f1524ad5d26e1041b19a4b85e7089eae309d Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 24 Oct 2017 16:25:49 -0700 Subject: [PATCH 68/95] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index f1461363b..5796ea8f9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -67,6 +67,7 @@ - [#3357](https://github.com/influxdata/telegraf/issues/3357): Remove warning when JSON contains null value. - [#3375](https://github.com/influxdata/telegraf/issues/3375): Fix ACL token usage in consul input plugin. - [#3369](https://github.com/influxdata/telegraf/issues/3369): Fix unquoting error with Tomcat 6. +- [#3373](https://github.com/influxdata/telegraf/issues/3373): Fix syscall panic in diskio on some Linux systems. ## v1.4.2 [2017-10-10] From a6797a44d53c469f97182a25ee346fd83b6a0498 Mon Sep 17 00:00:00 2001 From: Jeremy Doupe Date: Tue, 24 Oct 2017 18:28:52 -0500 Subject: [PATCH 69/95] Add history and summary types to telegraf and prometheus plugins (#3337) --- accumulator.go | 12 + agent/accumulator.go | 22 ++ metric.go | 2 + plugins/inputs/prometheus/parser.go | 8 +- plugins/inputs/prometheus/prometheus.go | 4 + .../prometheus_client/prometheus_client.go | 232 ++++++++++++------ .../prometheus_client_test.go | 120 ++++++++- testutil/accumulator.go | 18 ++ 8 files changed, 334 insertions(+), 84 deletions(-) diff --git a/accumulator.go b/accumulator.go index 13fd6e571..370f0c70c 100644 --- a/accumulator.go +++ b/accumulator.go @@ -28,6 +28,18 @@ type Accumulator interface { tags map[string]string, t ...time.Time) + // AddSummary is the same as AddFields, but will add the metric as a "Summary" type + AddSummary(measurement string, + fields map[string]interface{}, + tags map[string]string, + t ...time.Time) + + // AddHistogram is the same as AddFields, but will add the metric as a "Histogram" type + AddHistogram(measurement string, + fields map[string]interface{}, + tags map[string]string, + t ...time.Time) + SetPrecision(precision, interval time.Duration) AddError(err error) diff --git a/agent/accumulator.go b/agent/accumulator.go index 1f9e2270d..1fa9b13ee 100644 --- a/agent/accumulator.go +++ b/agent/accumulator.go @@ -76,6 +76,28 @@ func (ac *accumulator) AddCounter( } } +func (ac *accumulator) AddSummary( + measurement string, + fields map[string]interface{}, + tags map[string]string, + t ...time.Time, +) { + if m := ac.maker.MakeMetric(measurement, fields, tags, telegraf.Summary, ac.getTime(t)); m != nil { + ac.metrics <- m + } +} + +func (ac *accumulator) AddHistogram( + measurement string, + fields map[string]interface{}, + tags map[string]string, + t ...time.Time, +) { + if m := ac.maker.MakeMetric(measurement, fields, tags, telegraf.Histogram, ac.getTime(t)); m != nil { + ac.metrics <- m + } +} + // AddError passes a runtime error to the accumulator. // The error will be tagged with the plugin name and written to the log. func (ac *accumulator) AddError(err error) { diff --git a/metric.go b/metric.go index fc479b51d..3fb531358 100644 --- a/metric.go +++ b/metric.go @@ -13,6 +13,8 @@ const ( Counter Gauge Untyped + Summary + Histogram ) type Metric interface { diff --git a/plugins/inputs/prometheus/parser.go b/plugins/inputs/prometheus/parser.go index 0807d7e7a..6584fbc05 100644 --- a/plugins/inputs/prometheus/parser.go +++ b/plugins/inputs/prometheus/parser.go @@ -103,6 +103,10 @@ func valueType(mt dto.MetricType) telegraf.ValueType { return telegraf.Counter case dto.MetricType_GAUGE: return telegraf.Gauge + case dto.MetricType_SUMMARY: + return telegraf.Summary + case dto.MetricType_HISTOGRAM: + return telegraf.Histogram default: return telegraf.Untyped } @@ -145,11 +149,11 @@ func getNameAndValue(m *dto.Metric) map[string]interface{} { fields["gauge"] = float64(m.GetGauge().GetValue()) } } else if m.Counter != nil { - if !math.IsNaN(m.GetGauge().GetValue()) { + if !math.IsNaN(m.GetCounter().GetValue()) { fields["counter"] = float64(m.GetCounter().GetValue()) } } else if m.Untyped != nil { - if !math.IsNaN(m.GetGauge().GetValue()) { + if !math.IsNaN(m.GetUntyped().GetValue()) { fields["value"] = float64(m.GetUntyped().GetValue()) } } diff --git a/plugins/inputs/prometheus/prometheus.go b/plugins/inputs/prometheus/prometheus.go index 5445a12a3..c929a5b26 100644 --- a/plugins/inputs/prometheus/prometheus.go +++ b/plugins/inputs/prometheus/prometheus.go @@ -224,6 +224,10 @@ func (p *Prometheus) gatherURL(url UrlAndAddress, acc telegraf.Accumulator) erro acc.AddCounter(metric.Name(), metric.Fields(), tags, metric.Time()) case telegraf.Gauge: acc.AddGauge(metric.Name(), metric.Fields(), tags, metric.Time()) + case telegraf.Summary: + acc.AddSummary(metric.Name(), metric.Fields(), tags, metric.Time()) + case telegraf.Histogram: + acc.AddHistogram(metric.Name(), metric.Fields(), tags, metric.Time()) default: acc.AddFields(metric.Name(), metric.Fields(), tags, metric.Time()) } diff --git a/plugins/outputs/prometheus_client/prometheus_client.go b/plugins/outputs/prometheus_client/prometheus_client.go index d7702a062..f0b0a7673 100644 --- a/plugins/outputs/prometheus_client/prometheus_client.go +++ b/plugins/outputs/prometheus_client/prometheus_client.go @@ -8,6 +8,7 @@ import ( "os" "regexp" "sort" + "strconv" "strings" "sync" "time" @@ -28,8 +29,13 @@ type SampleID string type Sample struct { // Labels are the Prometheus labels. Labels map[string]string - // Value is the value in the Prometheus output. - Value float64 + // Value is the value in the Prometheus output. Only one of these will populated. + Value float64 + HistogramValue map[float64]uint64 + SummaryValue map[float64]float64 + // Histograms and Summaries need a count and a sum + Count uint64 + Sum float64 // Expiration is the deadline that this Sample is valid until. Expiration time.Time } @@ -38,8 +44,9 @@ type Sample struct { type MetricFamily struct { // Samples are the Sample belonging to this MetricFamily. Samples map[SampleID]*Sample - // Type of the Value. - ValueType prometheus.ValueType + // Need the telegraf ValueType because there isn't a Prometheus ValueType + // representing Histogram or Summary + TelegrafValueType telegraf.ValueType // LabelSet is the label counts for all Samples. LabelSet map[string]int } @@ -189,7 +196,16 @@ func (p *PrometheusClient) Collect(ch chan<- prometheus.Metric) { labels = append(labels, v) } - metric, err := prometheus.NewConstMetric(desc, family.ValueType, sample.Value, labels...) + var metric prometheus.Metric + var err error + switch family.TelegrafValueType { + case telegraf.Summary: + metric, err = prometheus.NewConstSummary(desc, sample.Count, sample.Sum, sample.SummaryValue, labels...) + case telegraf.Histogram: + metric, err = prometheus.NewConstHistogram(desc, sample.Count, sample.Sum, sample.HistogramValue, labels...) + default: + metric, err = prometheus.NewConstMetric(desc, getPromValueType(family.TelegrafValueType), sample.Value, labels...) + } if err != nil { log.Printf("E! Error creating prometheus metric, "+ "key: %s, labels: %v,\nerr: %s\n", @@ -205,7 +221,7 @@ func sanitize(value string) string { return invalidNameCharRE.ReplaceAllString(value, "_") } -func valueType(tt telegraf.ValueType) prometheus.ValueType { +func getPromValueType(tt telegraf.ValueType) prometheus.ValueType { switch tt { case telegraf.Counter: return prometheus.CounterValue @@ -226,6 +242,30 @@ func CreateSampleID(tags map[string]string) SampleID { return SampleID(strings.Join(pairs, ",")) } +func addSample(fam *MetricFamily, sample *Sample, sampleID SampleID) { + + for k, _ := range sample.Labels { + fam.LabelSet[k]++ + } + + fam.Samples[sampleID] = sample +} + +func (p *PrometheusClient) addMetricFamily(point telegraf.Metric, sample *Sample, mname string, sampleID SampleID) { + var fam *MetricFamily + var ok bool + if fam, ok = p.fam[mname]; !ok { + fam = &MetricFamily{ + Samples: make(map[SampleID]*Sample), + TelegrafValueType: point.Type(), + LabelSet: make(map[string]int), + } + p.fam[mname] = fam + } + + addSample(fam, sample, sampleID) +} + func (p *PrometheusClient) Write(metrics []telegraf.Metric) error { p.Lock() defer p.Unlock() @@ -234,7 +274,6 @@ func (p *PrometheusClient) Write(metrics []telegraf.Metric) error { for _, point := range metrics { tags := point.Tags() - vt := valueType(point.Type()) sampleID := CreateSampleID(tags) labels := make(map[string]string) @@ -251,77 +290,128 @@ func (p *PrometheusClient) Write(metrics []telegraf.Metric) error { } } - for fn, fv := range point.Fields() { - // Ignore string and bool fields. - var value float64 - switch fv := fv.(type) { - case int64: - value = float64(fv) - case float64: - value = fv - default: - continue - } - - sample := &Sample{ - Labels: labels, - Value: value, - Expiration: now.Add(p.ExpirationInterval.Duration), - } - - // Special handling of value field; supports passthrough from - // the prometheus input. + switch point.Type() { + case telegraf.Summary: var mname string - switch point.Type() { - case telegraf.Counter: - if fn == "counter" { - mname = sanitize(point.Name()) - } - case telegraf.Gauge: - if fn == "gauge" { - mname = sanitize(point.Name()) - } - } - if mname == "" { - if fn == "value" { - mname = sanitize(point.Name()) - } else { - mname = sanitize(fmt.Sprintf("%s_%s", point.Name(), fn)) - } - } - - var fam *MetricFamily - var ok bool - if fam, ok = p.fam[mname]; !ok { - fam = &MetricFamily{ - Samples: make(map[SampleID]*Sample), - ValueType: vt, - LabelSet: make(map[string]int), - } - p.fam[mname] = fam - } else { - // Metrics can be untyped even though the corresponding plugin - // creates them with a type. This happens when the metric was - // transferred over the network in a format that does not - // preserve value type and received using an input such as a - // queue consumer. To avoid issues we automatically upgrade - // value type from untyped to a typed metric. - if fam.ValueType == prometheus.UntypedValue { - fam.ValueType = vt + var sum float64 + var count uint64 + summaryvalue := make(map[float64]float64) + for fn, fv := range point.Fields() { + var value float64 + switch fv := fv.(type) { + case int64: + value = float64(fv) + case float64: + value = fv + default: + continue } - if vt != prometheus.UntypedValue && fam.ValueType != vt { - // Don't return an error since this would be a permanent error - log.Printf("Mixed ValueType for measurement %q; dropping point", point.Name()) - break + switch fn { + case "sum": + sum = value + case "count": + count = uint64(value) + default: + limit, err := strconv.ParseFloat(fn, 64) + if err == nil { + summaryvalue[limit] = value + } } } - - for k, _ := range sample.Labels { - fam.LabelSet[k]++ + sample := &Sample{ + Labels: labels, + SummaryValue: summaryvalue, + Count: count, + Sum: sum, + Expiration: now.Add(p.ExpirationInterval.Duration), } + mname = sanitize(point.Name()) - fam.Samples[sampleID] = sample + p.addMetricFamily(point, sample, mname, sampleID) + + case telegraf.Histogram: + var mname string + var sum float64 + var count uint64 + histogramvalue := make(map[float64]uint64) + for fn, fv := range point.Fields() { + var value float64 + switch fv := fv.(type) { + case int64: + value = float64(fv) + case float64: + value = fv + default: + continue + } + + switch fn { + case "sum": + sum = value + case "count": + count = uint64(value) + default: + limit, err := strconv.ParseFloat(fn, 64) + if err == nil { + histogramvalue[limit] = uint64(value) + } + } + } + sample := &Sample{ + Labels: labels, + HistogramValue: histogramvalue, + Count: count, + Sum: sum, + Expiration: now.Add(p.ExpirationInterval.Duration), + } + mname = sanitize(point.Name()) + + p.addMetricFamily(point, sample, mname, sampleID) + + default: + for fn, fv := range point.Fields() { + // Ignore string and bool fields. + var value float64 + switch fv := fv.(type) { + case int64: + value = float64(fv) + case float64: + value = fv + default: + continue + } + + sample := &Sample{ + Labels: labels, + Value: value, + Expiration: now.Add(p.ExpirationInterval.Duration), + } + + // Special handling of value field; supports passthrough from + // the prometheus input. + var mname string + switch point.Type() { + case telegraf.Counter: + if fn == "counter" { + mname = sanitize(point.Name()) + } + case telegraf.Gauge: + if fn == "gauge" { + mname = sanitize(point.Name()) + } + } + if mname == "" { + if fn == "value" { + mname = sanitize(point.Name()) + } else { + mname = sanitize(fmt.Sprintf("%s_%s", point.Name(), fn)) + } + } + + p.addMetricFamily(point, sample, mname, sampleID) + + } } } return nil diff --git a/plugins/outputs/prometheus_client/prometheus_client_test.go b/plugins/outputs/prometheus_client/prometheus_client_test.go index 1bb1cc83a..69509ae1c 100644 --- a/plugins/outputs/prometheus_client/prometheus_client_test.go +++ b/plugins/outputs/prometheus_client/prometheus_client_test.go @@ -9,7 +9,6 @@ import ( "github.com/influxdata/telegraf/metric" prometheus_input "github.com/influxdata/telegraf/plugins/inputs/prometheus" "github.com/influxdata/telegraf/testutil" - "github.com/prometheus/client_golang/prometheus" "github.com/stretchr/testify/require" ) @@ -45,7 +44,7 @@ func TestWrite_Basic(t *testing.T) { fam, ok := client.fam["foo"] require.True(t, ok) - require.Equal(t, prometheus.UntypedValue, fam.ValueType) + require.Equal(t, telegraf.Untyped, fam.TelegrafValueType) require.Equal(t, map[string]int{}, fam.LabelSet) sample, ok := fam.Samples[CreateSampleID(pt1.Tags())] @@ -119,7 +118,7 @@ func TestWrite_Counters(t *testing.T) { args args err error metricName string - promType prometheus.ValueType + valueType telegraf.ValueType }{ { name: "field named value is not added to metric name", @@ -129,7 +128,7 @@ func TestWrite_Counters(t *testing.T) { valueType: telegraf.Counter, }, metricName: "foo", - promType: prometheus.CounterValue, + valueType: telegraf.Counter, }, { name: "field named counter is not added to metric name", @@ -139,7 +138,7 @@ func TestWrite_Counters(t *testing.T) { valueType: telegraf.Counter, }, metricName: "foo", - promType: prometheus.CounterValue, + valueType: telegraf.Counter, }, { name: "field with any other name is added to metric name", @@ -149,7 +148,7 @@ func TestWrite_Counters(t *testing.T) { valueType: telegraf.Counter, }, metricName: "foo_other", - promType: prometheus.CounterValue, + valueType: telegraf.Counter, }, } for _, tt := range tests { @@ -167,7 +166,7 @@ func TestWrite_Counters(t *testing.T) { fam, ok := client.fam[tt.metricName] require.True(t, ok) - require.Equal(t, tt.promType, fam.ValueType) + require.Equal(t, tt.valueType, fam.TelegrafValueType) }) } } @@ -196,20 +195,119 @@ func TestWrite_Sanitize(t *testing.T) { } func TestWrite_Gauge(t *testing.T) { + type args struct { + measurement string + tags map[string]string + fields map[string]interface{} + valueType telegraf.ValueType + } + var tests = []struct { + name string + args args + err error + metricName string + valueType telegraf.ValueType + }{ + { + name: "field named value is not added to metric name", + args: args{ + measurement: "foo", + fields: map[string]interface{}{"value": 42}, + valueType: telegraf.Gauge, + }, + metricName: "foo", + valueType: telegraf.Gauge, + }, + { + name: "field named gauge is not added to metric name", + args: args{ + measurement: "foo", + fields: map[string]interface{}{"gauge": 42}, + valueType: telegraf.Gauge, + }, + metricName: "foo", + valueType: telegraf.Gauge, + }, + { + name: "field with any other name is added to metric name", + args: args{ + measurement: "foo", + fields: map[string]interface{}{"other": 42}, + valueType: telegraf.Gauge, + }, + metricName: "foo_other", + valueType: telegraf.Gauge, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + m, err := metric.New( + tt.args.measurement, + tt.args.tags, + tt.args.fields, + time.Now(), + tt.args.valueType, + ) + client := NewClient() + err = client.Write([]telegraf.Metric{m}) + require.Equal(t, tt.err, err) + + fam, ok := client.fam[tt.metricName] + require.True(t, ok) + require.Equal(t, tt.valueType, fam.TelegrafValueType) + + }) + } +} + +func TestWrite_Summary(t *testing.T) { client := NewClient() p1, err := metric.New( "foo", make(map[string]string), - map[string]interface{}{"value": 42}, + map[string]interface{}{"sum": 84, "count": 42, "0": 2, "0.5": 3, "1": 4}, time.Now(), - telegraf.Gauge) + telegraf.Summary) + err = client.Write([]telegraf.Metric{p1}) require.NoError(t, err) fam, ok := client.fam["foo"] require.True(t, ok) - require.Equal(t, prometheus.GaugeValue, fam.ValueType) + require.Equal(t, 1, len(fam.Samples)) + + sample1, ok := fam.Samples[CreateSampleID(p1.Tags())] + require.True(t, ok) + + require.Equal(t, 84.0, sample1.Sum) + require.Equal(t, uint64(42), sample1.Count) + require.Equal(t, 3, len(sample1.SummaryValue)) +} + +func TestWrite_Histogram(t *testing.T) { + client := NewClient() + + p1, err := metric.New( + "foo", + make(map[string]string), + map[string]interface{}{"sum": 84, "count": 42, "0": 2, "0.5": 3, "1": 4}, + time.Now(), + telegraf.Histogram) + + err = client.Write([]telegraf.Metric{p1}) + require.NoError(t, err) + + fam, ok := client.fam["foo"] + require.True(t, ok) + require.Equal(t, 1, len(fam.Samples)) + + sample1, ok := fam.Samples[CreateSampleID(p1.Tags())] + require.True(t, ok) + + require.Equal(t, 84.0, sample1.Sum) + require.Equal(t, uint64(42), sample1.Count) + require.Equal(t, 3, len(sample1.HistogramValue)) } func TestWrite_MixedValueType(t *testing.T) { @@ -307,7 +405,7 @@ func TestWrite_Tags(t *testing.T) { fam, ok := client.fam["foo"] require.True(t, ok) - require.Equal(t, prometheus.UntypedValue, fam.ValueType) + require.Equal(t, telegraf.Untyped, fam.TelegrafValueType) require.Equal(t, map[string]int{"host": 1}, fam.LabelSet) diff --git a/testutil/accumulator.go b/testutil/accumulator.go index c478400eb..29c362c87 100644 --- a/testutil/accumulator.go +++ b/testutil/accumulator.go @@ -122,6 +122,24 @@ func (a *Accumulator) AddMetrics(metrics []telegraf.Metric) { } } +func (a *Accumulator) AddSummary( + measurement string, + fields map[string]interface{}, + tags map[string]string, + timestamp ...time.Time, +) { + a.AddFields(measurement, fields, tags, timestamp...) +} + +func (a *Accumulator) AddHistogram( + measurement string, + fields map[string]interface{}, + tags map[string]string, + timestamp ...time.Time, +) { + a.AddFields(measurement, fields, tags, timestamp...) +} + // AddError appends the given error to Accumulator.Errors. func (a *Accumulator) AddError(err error) { if err == nil { From 206397d4755e005712cf3690e843fc3c522ad15b Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Tue, 24 Oct 2017 16:31:22 -0700 Subject: [PATCH 70/95] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 5796ea8f9..fa2ce68f5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -45,6 +45,7 @@ - [#3344](https://github.com/influxdata/telegraf/pull/3344): Add UDP IPv6 support to statsd input. - [#3350](https://github.com/influxdata/telegraf/pull/3350): Use labels in prometheus output for string fields. - [#3358](https://github.com/influxdata/telegraf/pull/3358): Add support for decimal timestamps to ts-epoch modifier. +- [#3337](https://github.com/influxdata/telegraf/pull/3337): Add histogram and summary types and use in prometheus plugins. ### Bugfixes From 6ea61b55d9ab54e0bb39fb653219960304f0179b Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 25 Oct 2017 14:15:10 -0700 Subject: [PATCH 71/95] Set release date for 1.4.3 --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index fa2ce68f5..debdb71b0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -54,7 +54,7 @@ - [#3319](https://github.com/influxdata/telegraf/issues/3319): Fix cloudwatch output requires unneeded permissions. - [#3351](https://github.com/influxdata/telegraf/issues/3351): Fix prometheus passthrough for existing value types. -## v1.4.3 [unreleased] +## v1.4.3 [2017-10-25] ### Bugfixes From a519abf13fad2ea1407f08bee4dce3f9ed039a95 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 25 Oct 2017 15:28:55 -0700 Subject: [PATCH 72/95] Gather concurrently from snmp agents (#3365) --- plugins/inputs/snmp/snmp.go | 69 ++++++++++++++++++-------------- plugins/inputs/snmp/snmp_test.go | 40 ++++++++++++------ 2 files changed, 66 insertions(+), 43 deletions(-) diff --git a/plugins/inputs/snmp/snmp.go b/plugins/inputs/snmp/snmp.go index 2aef729b3..c4f66f519 100644 --- a/plugins/inputs/snmp/snmp.go +++ b/plugins/inputs/snmp/snmp.go @@ -135,7 +135,7 @@ type Snmp struct { Name string Fields []Field `toml:"field"` - connectionCache map[string]snmpConnection + connectionCache []snmpConnection initialized bool } @@ -144,6 +144,8 @@ func (s *Snmp) init() error { return nil } + s.connectionCache = make([]snmpConnection, len(s.Agents)) + for i := range s.Tables { if err := s.Tables[i].init(); err != nil { return Errorf(err, "initializing table %s", s.Tables[i].Name) @@ -342,30 +344,36 @@ func (s *Snmp) Gather(acc telegraf.Accumulator) error { return err } - for _, agent := range s.Agents { - gs, err := s.getConnection(agent) - if err != nil { - acc.AddError(Errorf(err, "agent %s", agent)) - continue - } - - // First is the top-level fields. We treat the fields as table prefixes with an empty index. - t := Table{ - Name: s.Name, - Fields: s.Fields, - } - topTags := map[string]string{} - if err := s.gatherTable(acc, gs, t, topTags, false); err != nil { - acc.AddError(Errorf(err, "agent %s", agent)) - } - - // Now is the real tables. - for _, t := range s.Tables { - if err := s.gatherTable(acc, gs, t, topTags, true); err != nil { - acc.AddError(Errorf(err, "agent %s: gathering table %s", agent, t.Name)) + var wg sync.WaitGroup + for i, agent := range s.Agents { + wg.Add(1) + go func(i int, agent string) { + defer wg.Done() + gs, err := s.getConnection(i) + if err != nil { + acc.AddError(Errorf(err, "agent %s", agent)) + return } - } + + // First is the top-level fields. We treat the fields as table prefixes with an empty index. + t := Table{ + Name: s.Name, + Fields: s.Fields, + } + topTags := map[string]string{} + if err := s.gatherTable(acc, gs, t, topTags, false); err != nil { + acc.AddError(Errorf(err, "agent %s", agent)) + } + + // Now is the real tables. + for _, t := range s.Tables { + if err := s.gatherTable(acc, gs, t, topTags, true); err != nil { + acc.AddError(Errorf(err, "agent %s: gathering table %s", agent, t.Name)) + } + } + }(i, agent) } + wg.Wait() return nil } @@ -568,16 +576,18 @@ func (gsw gosnmpWrapper) Get(oids []string) (*gosnmp.SnmpPacket, error) { } // getConnection creates a snmpConnection (*gosnmp.GoSNMP) object and caches the -// result using `agent` as the cache key. -func (s *Snmp) getConnection(agent string) (snmpConnection, error) { - if s.connectionCache == nil { - s.connectionCache = map[string]snmpConnection{} - } - if gs, ok := s.connectionCache[agent]; ok { +// result using `agentIndex` as the cache key. This is done to allow multiple +// connections to a single address. It is an error to use a connection in +// more than one goroutine. +func (s *Snmp) getConnection(idx int) (snmpConnection, error) { + if gs := s.connectionCache[idx]; gs != nil { return gs, nil } + agent := s.Agents[idx] + gs := gosnmpWrapper{&gosnmp.GoSNMP{}} + s.connectionCache[idx] = gs host, portStr, err := net.SplitHostPort(agent) if err != nil { @@ -677,7 +687,6 @@ func (s *Snmp) getConnection(agent string) (snmpConnection, error) { return nil, Errorf(err, "setting up connection") } - s.connectionCache[agent] = gs return gs, nil } diff --git a/plugins/inputs/snmp/snmp_test.go b/plugins/inputs/snmp/snmp_test.go index 07fdeddc1..f9a7d95e2 100644 --- a/plugins/inputs/snmp/snmp_test.go +++ b/plugins/inputs/snmp/snmp_test.go @@ -120,7 +120,7 @@ func TestSampleConfig(t *testing.T) { }, }, } - assert.Equal(t, s, *conf.Inputs.Snmp[0]) + assert.Equal(t, &s, conf.Inputs.Snmp[0]) } func TestFieldInit(t *testing.T) { @@ -251,13 +251,16 @@ func TestSnmpInit_noTranslate(t *testing.T) { func TestGetSNMPConnection_v2(t *testing.T) { s := &Snmp{ + Agents: []string{"1.2.3.4:567", "1.2.3.4"}, Timeout: internal.Duration{Duration: 3 * time.Second}, Retries: 4, Version: 2, Community: "foo", } + err := s.init() + require.NoError(t, err) - gsc, err := s.getConnection("1.2.3.4:567") + gsc, err := s.getConnection(0) require.NoError(t, err) gs := gsc.(gosnmpWrapper) assert.Equal(t, "1.2.3.4", gs.Target) @@ -265,7 +268,7 @@ func TestGetSNMPConnection_v2(t *testing.T) { assert.Equal(t, gosnmp.Version2c, gs.Version) assert.Equal(t, "foo", gs.Community) - gsc, err = s.getConnection("1.2.3.4") + gsc, err = s.getConnection(1) require.NoError(t, err) gs = gsc.(gosnmpWrapper) assert.Equal(t, "1.2.3.4", gs.Target) @@ -274,6 +277,7 @@ func TestGetSNMPConnection_v2(t *testing.T) { func TestGetSNMPConnection_v3(t *testing.T) { s := &Snmp{ + Agents: []string{"1.2.3.4"}, Version: 3, MaxRepetitions: 20, ContextName: "mycontext", @@ -287,8 +291,10 @@ func TestGetSNMPConnection_v3(t *testing.T) { EngineBoots: 1, EngineTime: 2, } + err := s.init() + require.NoError(t, err) - gsc, err := s.getConnection("1.2.3.4") + gsc, err := s.getConnection(0) require.NoError(t, err) gs := gsc.(gosnmpWrapper) assert.Equal(t, gs.Version, gosnmp.Version3) @@ -308,15 +314,22 @@ func TestGetSNMPConnection_v3(t *testing.T) { } func TestGetSNMPConnection_caching(t *testing.T) { - s := &Snmp{} - gs1, err := s.getConnection("1.2.3.4") + s := &Snmp{ + Agents: []string{"1.2.3.4", "1.2.3.5", "1.2.3.5"}, + } + err := s.init() require.NoError(t, err) - gs2, err := s.getConnection("1.2.3.4") + gs1, err := s.getConnection(0) require.NoError(t, err) - gs3, err := s.getConnection("1.2.3.5") + gs2, err := s.getConnection(0) + require.NoError(t, err) + gs3, err := s.getConnection(1) + require.NoError(t, err) + gs4, err := s.getConnection(2) require.NoError(t, err) assert.True(t, gs1 == gs2) assert.False(t, gs2 == gs3) + assert.False(t, gs3 == gs4) } func TestGosnmpWrapper_walk_retry(t *testing.T) { @@ -560,11 +573,11 @@ func TestGather(t *testing.T) { }, }, - connectionCache: map[string]snmpConnection{ - "TestGather": tsc, + connectionCache: []snmpConnection{ + tsc, }, + initialized: true, } - acc := &testutil.Accumulator{} tstart := time.Now() @@ -607,9 +620,10 @@ func TestGather_host(t *testing.T) { }, }, - connectionCache: map[string]snmpConnection{ - "TestGather": tsc, + connectionCache: []snmpConnection{ + tsc, }, + initialized: true, } acc := &testutil.Accumulator{} From 5885ef2c1ccea032f346d0b3cc353c56ff2bc4ad Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 25 Oct 2017 15:29:56 -0700 Subject: [PATCH 73/95] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index debdb71b0..98dfbbf93 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -46,6 +46,7 @@ - [#3350](https://github.com/influxdata/telegraf/pull/3350): Use labels in prometheus output for string fields. - [#3358](https://github.com/influxdata/telegraf/pull/3358): Add support for decimal timestamps to ts-epoch modifier. - [#3337](https://github.com/influxdata/telegraf/pull/3337): Add histogram and summary types and use in prometheus plugins. +- [#3365](https://github.com/influxdata/telegraf/pull/3365): Gather concurrently from snmp agents. ### Bugfixes From 88ec171293585d7eeeb93e774c12f7b04ca98bed Mon Sep 17 00:00:00 2001 From: Vladimir S Date: Thu, 26 Oct 2017 23:35:37 +0300 Subject: [PATCH 74/95] Perform DNS lookup before ping (#3385) --- plugins/inputs/ping/README.md | 7 +++-- plugins/inputs/ping/ping.go | 33 +++++++++++++++++++----- plugins/inputs/ping/ping_test.go | 3 +++ plugins/inputs/ping/ping_windows.go | 32 ++++++++++++++--------- plugins/inputs/ping/ping_windows_test.go | 8 +++++- 5 files changed, 61 insertions(+), 22 deletions(-) diff --git a/plugins/inputs/ping/README.md b/plugins/inputs/ping/README.md index 2274f42c9..914477c54 100644 --- a/plugins/inputs/ping/README.md +++ b/plugins/inputs/ping/README.md @@ -28,11 +28,14 @@ urls = ["www.google.com"] # required - packets_received ( from ping output ) - percent_reply_loss ( compute from packets_transmitted and reply_received ) - percent_packets_loss ( compute from packets_transmitted and packets_received ) -- errors ( when host can not be found or wrong prameters is passed to application ) +- errors ( when host can not be found or wrong parameters is passed to application ) - response time - average_response_ms ( compute from minimum_response_ms and maximum_response_ms ) - minimum_response_ms ( from ping output ) - maximum_response_ms ( from ping output ) +- result_code + - 0: success + - 1: no such host ### Tags: @@ -44,5 +47,5 @@ urls = ["www.google.com"] # required ``` $ ./telegraf --config telegraf.conf --input-filter ping --test * Plugin: ping, Collection 1 -ping,host=WIN-PBAPLP511R7,url=www.google.com average_response_ms=7i,maximum_response_ms=9i,minimum_response_ms=7i,packets_received=4i,packets_transmitted=4i,percent_packet_loss=0,percent_reply_loss=0,reply_received=4i 1469879119000000000 +ping,host=WIN-PBAPLP511R7,url=www.google.com result_code=0i,average_response_ms=7i,maximum_response_ms=9i,minimum_response_ms=7i,packets_received=4i,packets_transmitted=4i,percent_packet_loss=0,percent_reply_loss=0,reply_received=4i 1469879119000000000 ``` diff --git a/plugins/inputs/ping/ping.go b/plugins/inputs/ping/ping.go index dcbb2c286..cae575bfd 100644 --- a/plugins/inputs/ping/ping.go +++ b/plugins/inputs/ping/ping.go @@ -5,6 +5,7 @@ package ping import ( "errors" "fmt" + "net" "os/exec" "runtime" "strconv" @@ -76,6 +77,17 @@ func (p *Ping) Gather(acc telegraf.Accumulator) error { wg.Add(1) go func(u string) { defer wg.Done() + tags := map[string]string{"url": u} + fields := map[string]interface{}{"result_code": 0} + + _, err := net.LookupHost(u) + if err != nil { + acc.AddError(err) + fields["result_code"] = 1 + acc.AddFields("ping", fields, tags) + return + } + args := p.args(u) totalTimeout := float64(p.Count)*p.Timeout + float64(p.Count-1)*p.PingInterval @@ -99,24 +111,23 @@ func (p *Ping) Gather(acc telegraf.Accumulator) error { } else { acc.AddError(err) } + acc.AddFields("ping", fields, tags) return } } - tags := map[string]string{"url": u} trans, rec, min, avg, max, stddev, err := processPingOutput(out) if err != nil { // fatal error acc.AddError(fmt.Errorf("%s: %s", err, u)) + acc.AddFields("ping", fields, tags) return } // Calculate packet loss percentage loss := float64(trans-rec) / float64(trans) * 100.0 - fields := map[string]interface{}{ - "packets_transmitted": trans, - "packets_received": rec, - "percent_packet_loss": loss, - } + fields["packets_transmitted"] = trans + fields["packets_received"] = rec + fields["percent_packet_loss"] = loss if min > 0 { fields["minimum_response_ms"] = min } @@ -194,7 +205,6 @@ func processPingOutput(out string) (int, int, float64, float64, float64, float64 for _, line := range lines { if strings.Contains(line, "transmitted") && strings.Contains(line, "received") { - err = nil stats := strings.Split(line, ", ") // Transmitted packets trans, err = strconv.Atoi(strings.Split(stats[0], " ")[0]) @@ -209,8 +219,17 @@ func processPingOutput(out string) (int, int, float64, float64, float64, float64 } else if strings.Contains(line, "min/avg/max") { stats := strings.Split(line, " ")[3] min, err = strconv.ParseFloat(strings.Split(stats, "/")[0], 64) + if err != nil { + return trans, recv, min, avg, max, stddev, err + } avg, err = strconv.ParseFloat(strings.Split(stats, "/")[1], 64) + if err != nil { + return trans, recv, min, avg, max, stddev, err + } max, err = strconv.ParseFloat(strings.Split(stats, "/")[2], 64) + if err != nil { + return trans, recv, min, avg, max, stddev, err + } stddev, err = strconv.ParseFloat(strings.Split(stats, "/")[3], 64) if err != nil { return trans, recv, min, avg, max, stddev, err diff --git a/plugins/inputs/ping/ping_test.go b/plugins/inputs/ping/ping_test.go index 8d422d4b2..eafe89428 100644 --- a/plugins/inputs/ping/ping_test.go +++ b/plugins/inputs/ping/ping_test.go @@ -158,6 +158,7 @@ func TestPingGather(t *testing.T) { "average_response_ms": 43.628, "maximum_response_ms": 51.806, "standard_deviation_ms": 5.325, + "result_code": 0, } acc.AssertContainsTaggedFields(t, "ping", fields, tags) @@ -198,6 +199,7 @@ func TestLossyPingGather(t *testing.T) { "average_response_ms": 44.033, "maximum_response_ms": 51.806, "standard_deviation_ms": 5.325, + "result_code": 0, } acc.AssertContainsTaggedFields(t, "ping", fields, tags) } @@ -230,6 +232,7 @@ func TestBadPingGather(t *testing.T) { "packets_transmitted": 2, "packets_received": 0, "percent_packet_loss": 100.0, + "result_code": 0, } acc.AssertContainsTaggedFields(t, "ping", fields, tags) } diff --git a/plugins/inputs/ping/ping_windows.go b/plugins/inputs/ping/ping_windows.go index 1c83ee84e..9645d175a 100644 --- a/plugins/inputs/ping/ping_windows.go +++ b/plugins/inputs/ping/ping_windows.go @@ -4,6 +4,7 @@ package ping import ( "errors" + "net" "os/exec" "regexp" "strconv" @@ -158,6 +159,18 @@ func (p *Ping) Gather(acc telegraf.Accumulator) error { wg.Add(1) go func(u string) { defer wg.Done() + + tags := map[string]string{"url": u} + fields := map[string]interface{}{"result_code": 0} + + _, err := net.LookupHost(u) + if err != nil { + errorChannel <- err + fields["result_code"] = 1 + acc.AddFields("ping", fields, tags) + return + } + args := p.args(u) totalTimeout := p.timeout() * float64(p.Count) out, err := p.pingHost(totalTimeout, args...) @@ -167,7 +180,6 @@ func (p *Ping) Gather(acc telegraf.Accumulator) error { // Combine go err + stderr output pendingError = errors.New(strings.TrimSpace(out) + ", " + err.Error()) } - tags := map[string]string{"url": u} trans, recReply, receivePacket, avg, min, max, err := processPingOutput(out) if err != nil { // fatal error @@ -175,24 +187,20 @@ func (p *Ping) Gather(acc telegraf.Accumulator) error { errorChannel <- pendingError } errorChannel <- err - fields := map[string]interface{}{ - "errors": 100.0, - } + fields["errors"] = 100.0 acc.AddFields("ping", fields, tags) - return } // Calculate packet loss percentage lossReply := float64(trans-recReply) / float64(trans) * 100.0 lossPackets := float64(trans-receivePacket) / float64(trans) * 100.0 - fields := map[string]interface{}{ - "packets_transmitted": trans, - "reply_received": recReply, - "packets_received": receivePacket, - "percent_packet_loss": lossPackets, - "percent_reply_loss": lossReply, - } + + fields["packets_transmitted"] = trans + fields["reply_received"] = recReply + fields["packets_received"] = receivePacket + fields["percent_packet_loss"] = lossPackets + fields["percent_reply_loss"] = lossReply if avg > 0 { fields["average_response_ms"] = float64(avg) } diff --git a/plugins/inputs/ping/ping_windows_test.go b/plugins/inputs/ping/ping_windows_test.go index aa7bc064b..178e42fcb 100644 --- a/plugins/inputs/ping/ping_windows_test.go +++ b/plugins/inputs/ping/ping_windows_test.go @@ -4,9 +4,10 @@ package ping import ( "errors" + "testing" + "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/assert" - "testing" ) // Windows ping format ( should support multilanguage ?) @@ -81,6 +82,7 @@ func TestPingGather(t *testing.T) { "average_response_ms": 50.0, "minimum_response_ms": 50.0, "maximum_response_ms": 52.0, + "result_code": 0, } acc.AssertContainsTaggedFields(t, "ping", fields, tags) @@ -121,6 +123,7 @@ func TestBadPingGather(t *testing.T) { "reply_received": 0, "percent_packet_loss": 100.0, "percent_reply_loss": 100.0, + "result_code": 0, } acc.AssertContainsTaggedFields(t, "ping", fields, tags) } @@ -167,6 +170,7 @@ func TestLossyPingGather(t *testing.T) { "average_response_ms": 115.0, "minimum_response_ms": 114.0, "maximum_response_ms": 119.0, + "result_code": 0, } acc.AssertContainsTaggedFields(t, "ping", fields, tags) } @@ -269,6 +273,7 @@ func TestUnreachablePingGather(t *testing.T) { "reply_received": 0, "percent_packet_loss": 75.0, "percent_reply_loss": 100.0, + "result_code": 0, } acc.AssertContainsTaggedFields(t, "ping", fields, tags) @@ -315,6 +320,7 @@ func TestTTLExpiredPingGather(t *testing.T) { "reply_received": 0, "percent_packet_loss": 75.0, "percent_reply_loss": 100.0, + "result_code": 0, } acc.AssertContainsTaggedFields(t, "ping", fields, tags) From 8b4708c82a987b490639e3f570d50986c60e57d3 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Thu, 26 Oct 2017 13:37:54 -0700 Subject: [PATCH 75/95] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 98dfbbf93..06c0a776b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -47,6 +47,7 @@ - [#3358](https://github.com/influxdata/telegraf/pull/3358): Add support for decimal timestamps to ts-epoch modifier. - [#3337](https://github.com/influxdata/telegraf/pull/3337): Add histogram and summary types and use in prometheus plugins. - [#3365](https://github.com/influxdata/telegraf/pull/3365): Gather concurrently from snmp agents. +- [#3333](https://github.com/influxdata/telegraf/issues/3333): Perform DNS lookup before ping and report result. ### Bugfixes From ffa8a4a716a82913e10d5ac5f6a5479282fa7fc7 Mon Sep 17 00:00:00 2001 From: Maximilien Richer Date: Fri, 27 Oct 2017 20:53:59 +0200 Subject: [PATCH 76/95] Add instance name option to varnish plugin (#3398) This change add a new configuration option to allow probing of namespaced varnish instances, usually reached using the '-n' switch on the varnish cli. --- plugins/inputs/varnish/varnish.go | 31 +++++++++++++++++--------- plugins/inputs/varnish/varnish_test.go | 17 +++++++------- 2 files changed, 30 insertions(+), 18 deletions(-) diff --git a/plugins/inputs/varnish/varnish.go b/plugins/inputs/varnish/varnish.go index 08c885e61..7ceb7d250 100644 --- a/plugins/inputs/varnish/varnish.go +++ b/plugins/inputs/varnish/varnish.go @@ -17,13 +17,14 @@ import ( "github.com/influxdata/telegraf/plugins/inputs" ) -type runner func(cmdName string, UseSudo bool) (*bytes.Buffer, error) +type runner func(cmdName string, UseSudo bool, InstanceName string) (*bytes.Buffer, error) // Varnish is used to store configuration values type Varnish struct { - Stats []string - Binary string - UseSudo bool + Stats []string + Binary string + UseSudo bool + InstanceName string filter filter.Filter run runner @@ -44,6 +45,10 @@ var sampleConfig = ` ## Glob matching can be used, ie, stats = ["MAIN.*"] ## stats may also be set to ["*"], which will collect all stats stats = ["MAIN.cache_hit", "MAIN.cache_miss", "MAIN.uptime"] + + ## Optional name for the varnish instance (or working directory) to query + ## Usually appened after -n in varnish cli + #name = instanceName ` func (s *Varnish) Description() string { @@ -56,8 +61,13 @@ func (s *Varnish) SampleConfig() string { } // Shell out to varnish_stat and return the output -func varnishRunner(cmdName string, UseSudo bool) (*bytes.Buffer, error) { +func varnishRunner(cmdName string, UseSudo bool, InstanceName string) (*bytes.Buffer, error) { cmdArgs := []string{"-1"} + + if InstanceName != "" { + cmdArgs = append(cmdArgs, []string{"-n", InstanceName}...) + } + cmd := exec.Command(cmdName, cmdArgs...) if UseSudo { @@ -99,7 +109,7 @@ func (s *Varnish) Gather(acc telegraf.Accumulator) error { } } - out, err := s.run(s.Binary, s.UseSudo) + out, err := s.run(s.Binary, s.UseSudo, s.InstanceName) if err != nil { return fmt.Errorf("error gathering metrics: %s", err) } @@ -155,10 +165,11 @@ func (s *Varnish) Gather(acc telegraf.Accumulator) error { func init() { inputs.Add("varnish", func() telegraf.Input { return &Varnish{ - run: varnishRunner, - Stats: defaultStats, - Binary: defaultBinary, - UseSudo: false, + run: varnishRunner, + Stats: defaultStats, + Binary: defaultBinary, + UseSudo: false, + InstanceName: "", } }) } diff --git a/plugins/inputs/varnish/varnish_test.go b/plugins/inputs/varnish/varnish_test.go index a2b388a0b..30f91e237 100644 --- a/plugins/inputs/varnish/varnish_test.go +++ b/plugins/inputs/varnish/varnish_test.go @@ -5,14 +5,15 @@ package varnish import ( "bytes" "fmt" - "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" "strings" "testing" + + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/assert" ) -func fakeVarnishStat(output string, useSudo bool) func(string, bool) (*bytes.Buffer, error) { - return func(string, bool) (*bytes.Buffer, error) { +func fakeVarnishStat(output string, useSudo bool, InstanceName string) func(string, bool, string) (*bytes.Buffer, error) { + return func(string, bool, string) (*bytes.Buffer, error) { return bytes.NewBuffer([]byte(output)), nil } } @@ -20,7 +21,7 @@ func fakeVarnishStat(output string, useSudo bool) func(string, bool) (*bytes.Buf func TestGather(t *testing.T) { acc := &testutil.Accumulator{} v := &Varnish{ - run: fakeVarnishStat(smOutput, false), + run: fakeVarnishStat(smOutput, false, ""), Stats: []string{"*"}, } v.Gather(acc) @@ -36,7 +37,7 @@ func TestGather(t *testing.T) { func TestParseFullOutput(t *testing.T) { acc := &testutil.Accumulator{} v := &Varnish{ - run: fakeVarnishStat(fullOutput, true), + run: fakeVarnishStat(fullOutput, true, ""), Stats: []string{"*"}, } err := v.Gather(acc) @@ -51,7 +52,7 @@ func TestParseFullOutput(t *testing.T) { func TestFilterSomeStats(t *testing.T) { acc := &testutil.Accumulator{} v := &Varnish{ - run: fakeVarnishStat(fullOutput, false), + run: fakeVarnishStat(fullOutput, false, ""), Stats: []string{"MGT.*", "VBE.*"}, } err := v.Gather(acc) @@ -74,7 +75,7 @@ func TestFieldConfig(t *testing.T) { for fieldCfg, expected := range expect { acc := &testutil.Accumulator{} v := &Varnish{ - run: fakeVarnishStat(fullOutput, true), + run: fakeVarnishStat(fullOutput, true, ""), Stats: strings.Split(fieldCfg, ","), } err := v.Gather(acc) From 53b13a20d00fdd148e3dcd5c168ee9c1035edeab Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Fri, 27 Oct 2017 11:55:17 -0700 Subject: [PATCH 77/95] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 06c0a776b..5abd7deee 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -48,6 +48,7 @@ - [#3337](https://github.com/influxdata/telegraf/pull/3337): Add histogram and summary types and use in prometheus plugins. - [#3365](https://github.com/influxdata/telegraf/pull/3365): Gather concurrently from snmp agents. - [#3333](https://github.com/influxdata/telegraf/issues/3333): Perform DNS lookup before ping and report result. +- [#3398](https://github.com/influxdata/telegraf/issues/3398): Add instance name option to varnish plugin. ### Bugfixes From d9fa916711c66975fb89ed4f38ffb4d7379d9a69 Mon Sep 17 00:00:00 2001 From: Aditya C S Date: Tue, 31 Oct 2017 00:56:39 +0530 Subject: [PATCH 78/95] Update docker plugin README (#3404) --- plugins/inputs/docker/README.md | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/plugins/inputs/docker/README.md b/plugins/inputs/docker/README.md index 158ed7bcc..faee9a57b 100644 --- a/plugins/inputs/docker/README.md +++ b/plugins/inputs/docker/README.md @@ -17,6 +17,11 @@ to gather stats from the [Engine API](https://docs.docker.com/engine/api/v1.20/) ## To use environment variables (ie, docker-machine), set endpoint = "ENV" endpoint = "unix:///var/run/docker.sock" + ## Set to true to collect Swarm metrics(desired_replicas, running_replicas) + ## Note: configure this in one of the manager nodes in a Swarm cluster. + ## configuring in multiple Swarm managers results in duplication of metrics. + gather_services = false + ## Only collect metrics for these containers. Values will be appended to ## container_name_include. ## Deprecated (1.4.0), use container_name_include @@ -161,6 +166,9 @@ based on the availability of per-cpu stats on your system. - available - total - used +- docker_swarm + - tasks_desired + - tasks_running ### Tags: @@ -191,6 +199,10 @@ based on the availability of per-cpu stats on your system. - network - docker_container_blkio specific: - device +- docker_swarm specific: + - service_id + - service_name + - service_mode ### Example Output: @@ -242,4 +254,7 @@ io_service_bytes_recursive_sync=77824i,io_service_bytes_recursive_total=80293888 io_service_bytes_recursive_write=368640i,io_serviced_recursive_async=6562i,\ io_serviced_recursive_read=6492i,io_serviced_recursive_sync=37i,\ io_serviced_recursive_total=6599i,io_serviced_recursive_write=107i 1453409536840126713 -``` +>docker_swarm, +service_id=xaup2o9krw36j2dy1mjx1arjw,service_mode=replicated,service_name=test,\ +tasks_desired=3,tasks_running=3 1508968160000000000 +``` \ No newline at end of file From 23ad959d71f97659344609dc07530551ebdf92d1 Mon Sep 17 00:00:00 2001 From: Aditya C S Date: Tue, 31 Oct 2017 02:22:40 +0530 Subject: [PATCH 79/95] Add support for SSL settings to ElasticSearch output plugin (#3406) --- plugins/outputs/elasticsearch/README.md | 7 ++++ .../outputs/elasticsearch/elasticsearch.go | 35 ++++++++++++++++--- 2 files changed, 37 insertions(+), 5 deletions(-) diff --git a/plugins/outputs/elasticsearch/README.md b/plugins/outputs/elasticsearch/README.md index 620d5a82c..b69631ba8 100644 --- a/plugins/outputs/elasticsearch/README.md +++ b/plugins/outputs/elasticsearch/README.md @@ -174,6 +174,13 @@ This plugin will format the events in the following way: # %H - hour (00..23) index_name = "telegraf-%Y.%m.%d" # required. + ## Optional SSL Config + # ssl_ca = "/etc/telegraf/ca.pem" + # ssl_cert = "/etc/telegraf/cert.pem" + # ssl_key = "/etc/telegraf/key.pem" + ## Use SSL but skip chain & host verification + # insecure_skip_verify = false + ## Template Config ## Set to true if you want telegraf to manage its index template. ## If enabled it will create a recommended index template for telegraf indexes diff --git a/plugins/outputs/elasticsearch/elasticsearch.go b/plugins/outputs/elasticsearch/elasticsearch.go index dbd359b90..31a702e55 100644 --- a/plugins/outputs/elasticsearch/elasticsearch.go +++ b/plugins/outputs/elasticsearch/elasticsearch.go @@ -3,15 +3,15 @@ package elasticsearch import ( "context" "fmt" - "log" - "strconv" - "strings" - "time" - "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/outputs" "gopkg.in/olivere/elastic.v5" + "log" + "net/http" + "strconv" + "strings" + "time" ) type Elasticsearch struct { @@ -25,6 +25,10 @@ type Elasticsearch struct { ManageTemplate bool TemplateName string OverwriteTemplate bool + SSLCA string `toml:"ssl_ca"` // Path to CA file + SSLCert string `toml:"ssl_cert"` // Path to host cert file + SSLKey string `toml:"ssl_key"` // Path to cert key file + InsecureSkipVerify bool // Use SSL but skip chain & host verification Client *elastic.Client } @@ -56,6 +60,13 @@ var sampleConfig = ` # %H - hour (00..23) index_name = "telegraf-%Y.%m.%d" # required. + ## Optional SSL Config + # ssl_ca = "/etc/telegraf/ca.pem" + # ssl_cert = "/etc/telegraf/cert.pem" + # ssl_key = "/etc/telegraf/key.pem" + ## Use SSL but skip chain & host verification + # insecure_skip_verify = false + ## Template Config ## Set to true if you want telegraf to manage its index template. ## If enabled it will create a recommended index template for telegraf indexes @@ -76,7 +87,21 @@ func (a *Elasticsearch) Connect() error { var clientOptions []elastic.ClientOptionFunc + tlsCfg, err := internal.GetTLSConfig(a.SSLCert, a.SSLKey, a.SSLCA, a.InsecureSkipVerify) + if err != nil { + return err + } + tr := &http.Transport{ + TLSClientConfig: tlsCfg, + } + + httpclient := &http.Client{ + Transport: tr, + Timeout: a.Timeout.Duration, + } + clientOptions = append(clientOptions, + elastic.SetHttpClient(httpclient), elastic.SetSniff(a.EnableSniffer), elastic.SetURL(a.URLs...), elastic.SetHealthcheckInterval(a.HealthCheckInterval.Duration), From 4d5de8698b8f21c420ea0b06bbc713b2c9ddef4d Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 30 Oct 2017 13:53:45 -0700 Subject: [PATCH 80/95] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 5abd7deee..ba326b864 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -49,6 +49,7 @@ - [#3365](https://github.com/influxdata/telegraf/pull/3365): Gather concurrently from snmp agents. - [#3333](https://github.com/influxdata/telegraf/issues/3333): Perform DNS lookup before ping and report result. - [#3398](https://github.com/influxdata/telegraf/issues/3398): Add instance name option to varnish plugin. +- [#3406](https://github.com/influxdata/telegraf/pull/3406): Add support for SSL settings to ElasticSearch output plugin. ### Bugfixes From fcfcc803b1f8e0e49bf808ddbdcefb79c926699f Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 30 Oct 2017 15:33:20 -0700 Subject: [PATCH 81/95] Use explicit schemas in mqtt_consumer input (#3401) --- plugins/inputs/mqtt_consumer/README.md | 4 +++- plugins/inputs/mqtt_consumer/mqtt_consumer.go | 20 ++++++++++++++----- 2 files changed, 18 insertions(+), 6 deletions(-) diff --git a/plugins/inputs/mqtt_consumer/README.md b/plugins/inputs/mqtt_consumer/README.md index 52990ef76..2889bde59 100644 --- a/plugins/inputs/mqtt_consumer/README.md +++ b/plugins/inputs/mqtt_consumer/README.md @@ -10,7 +10,9 @@ The plugin expects messages in the ```toml # Read metrics from MQTT topic(s) [[inputs.mqtt_consumer]] - servers = ["localhost:1883"] + ## MQTT broker URLs to be used. The format should be scheme://host:port, + ## schema can be tcp, ssl, or ws. + servers = ["tcp://localhost:1883"] ## MQTT QoS, must be 0, 1, or 2 qos = 0 ## Connection timeout for initial connection in seconds diff --git a/plugins/inputs/mqtt_consumer/mqtt_consumer.go b/plugins/inputs/mqtt_consumer/mqtt_consumer.go index ddffbf258..6903f654d 100644 --- a/plugins/inputs/mqtt_consumer/mqtt_consumer.go +++ b/plugins/inputs/mqtt_consumer/mqtt_consumer.go @@ -56,7 +56,10 @@ type MQTTConsumer struct { } var sampleConfig = ` - servers = ["localhost:1883"] + ## MQTT broker URLs to be used. The format should be scheme://host:port, + ## schema can be tcp, ssl, or ws. + servers = ["tcp://localhost:1883"] + ## MQTT QoS, must be 0, 1, or 2 qos = 0 ## Connection timeout for initial connection in seconds @@ -239,9 +242,7 @@ func (m *MQTTConsumer) createOpts() (*mqtt.ClientOptions, error) { return nil, err } - scheme := "tcp" if tlsCfg != nil { - scheme = "ssl" opts.SetTLSConfig(tlsCfg) } @@ -257,8 +258,17 @@ func (m *MQTTConsumer) createOpts() (*mqtt.ClientOptions, error) { if len(m.Servers) == 0 { return opts, fmt.Errorf("could not get host infomations") } - for _, host := range m.Servers { - server := fmt.Sprintf("%s://%s", scheme, host) + + for _, server := range m.Servers { + // Preserve support for host:port style servers; deprecated in Telegraf 1.4.4 + if !strings.Contains(server, "://") { + log.Printf("W! mqtt_consumer server %q should be updated to use `scheme://host:port` format", server) + if tlsCfg == nil { + server = "tcp://" + server + } else { + server = "ssl://" + server + } + } opts.AddBroker(server) } From c116af35c7e625fe6e839e1f77f0ccb19c1708b9 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 30 Oct 2017 15:35:34 -0700 Subject: [PATCH 82/95] Update changelog --- CHANGELOG.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index ba326b864..8aee56f33 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -58,6 +58,10 @@ - [#3319](https://github.com/influxdata/telegraf/issues/3319): Fix cloudwatch output requires unneeded permissions. - [#3351](https://github.com/influxdata/telegraf/issues/3351): Fix prometheus passthrough for existing value types. +## v1.4.4 [unreleased] + +- [#3401](https://github.com/influxdata/telegraf/pull/3401): Use schema specified in mqtt_consumer input. + ## v1.4.3 [2017-10-25] ### Bugfixes From 777b84d1dca12bbe066303ee27d12cf0c9bdf764 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Mon, 30 Oct 2017 16:32:39 -0700 Subject: [PATCH 83/95] Clarify what it means to filter metrics from processors --- docs/AGGREGATORS_AND_PROCESSORS.md | 5 +++++ docs/CONFIGURATION.md | 26 +++++++++++++++++++++++++- 2 files changed, 30 insertions(+), 1 deletion(-) diff --git a/docs/AGGREGATORS_AND_PROCESSORS.md b/docs/AGGREGATORS_AND_PROCESSORS.md index af4a0e6d0..ffa9c8f7e 100644 --- a/docs/AGGREGATORS_AND_PROCESSORS.md +++ b/docs/AGGREGATORS_AND_PROCESSORS.md @@ -39,6 +39,11 @@ metrics as they pass through Telegraf: Both Aggregators and Processors analyze metrics as they pass through Telegraf. +Use [measurement filtering](CONFIGURATION.md#measurement-filtering) +to control which metrics are passed through a processor or aggregator. If a +metric is filtered out the metric bypasses the plugin and is passed downstream +to the next plugin. + **Processor** plugins process metrics as they pass through and immediately emit results based on the values they process. For example, this could be printing all metrics or adding a tag to all metrics that pass through. diff --git a/docs/CONFIGURATION.md b/docs/CONFIGURATION.md index 6c48d2d84..bf8b4ebf6 100644 --- a/docs/CONFIGURATION.md +++ b/docs/CONFIGURATION.md @@ -98,9 +98,13 @@ you can configure that here. * **name_suffix**: Specifies a suffix to attach to the measurement name. * **tags**: A map of tags to apply to a specific input's measurements. +The [measurement filtering](#measurement-filtering) parameters can be used to +limit what metrics are emitted from the input plugin. + ## Output Configuration -There are no generic configuration options available for all outputs. +The [measurement filtering](#measurement-filtering) parameters can be used to +limit what metrics are emitted from the output plugin. ## Aggregator Configuration @@ -121,6 +125,10 @@ aggregator and will not get sent to the output plugins. * **name_suffix**: Specifies a suffix to attach to the measurement name. * **tags**: A map of tags to apply to a specific input's measurements. +The [measurement filtering](#measurement-filtering) parameters be used to +limit what metrics are handled by the aggregator. Excluded metrics are passed +downstream to the next aggregator. + ## Processor Configuration The following config parameters are available for all processors: @@ -128,6 +136,10 @@ The following config parameters are available for all processors: * **order**: This is the order in which the processor(s) get executed. If this is not specified then processor execution order will be random. +The [measurement filtering](#measurement-filtering) can parameters may be used +to limit what metrics are handled by the processor. Excluded metrics are +passed downstream to the next processor. + #### Measurement Filtering Filters can be configured per input, output, processor, or aggregator, @@ -377,3 +389,15 @@ to the system load metrics due to the `namepass` parameter. [[outputs.file]] files = ["stdout"] ``` + +#### Processor Configuration Examples: + +Print only the metrics with `cpu` as the measurement name, all metrics are +passed to the output: +```toml +[[processors.printer]] + namepass = "cpu" + +[[outputs.file]] + files = ["/tmp/metrics.out"] +``` From 63842d48fd41f59bd0eb5038f2926aace2e221f1 Mon Sep 17 00:00:00 2001 From: Maximilien Richer Date: Wed, 1 Nov 2017 00:58:45 +0100 Subject: [PATCH 84/95] Add config to input-varnish README (#3414) --- plugins/inputs/varnish/README.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/plugins/inputs/varnish/README.md b/plugins/inputs/varnish/README.md index 215852d23..01ab7ac24 100644 --- a/plugins/inputs/varnish/README.md +++ b/plugins/inputs/varnish/README.md @@ -17,6 +17,10 @@ This plugin gathers stats from [Varnish HTTP Cache](https://varnish-cache.org/) ## Setting stats will override the defaults shown below. ## stats may also be set to ["all"], which will collect all stats stats = ["MAIN.cache_hit", "MAIN.cache_miss", "MAIN.uptime"] + + ## Optional name for the varnish instance (or working directory) to query + ## Usually appened after -n in varnish cli + #name = instanceName ``` ### Measurements & Fields: From 5f215c22fe2530070a33a7c0baff24b9a92f0ac3 Mon Sep 17 00:00:00 2001 From: Maximilien Richer Date: Wed, 1 Nov 2017 01:00:06 +0100 Subject: [PATCH 85/95] Fix typos in comments (#3415) --- agent/agent.go | 2 +- internal/config/config.go | 4 ++-- metric/parse.go | 2 +- metric/reader_test.go | 4 ++-- plugins/inputs/cloudwatch/cloudwatch.go | 2 +- plugins/inputs/couchbase/couchbase.go | 2 +- plugins/inputs/dns_query/dns_query.go | 2 +- plugins/inputs/ipmi_sensor/ipmi.go | 2 +- plugins/inputs/iptables/iptables_test.go | 2 +- plugins/inputs/mesos/mesos.go | 2 +- plugins/inputs/minecraft/internal/rcon/rcon.go | 2 +- plugins/inputs/minecraft/minecraft.go | 2 +- plugins/inputs/minecraft/rcon.go | 2 +- plugins/inputs/phpfpm/fcgi_client.go | 2 +- plugins/inputs/phpfpm/phpfpm_test.go | 6 +++--- plugins/inputs/ping/ping_windows.go | 2 +- plugins/inputs/powerdns/powerdns_test.go | 2 +- plugins/inputs/statsd/statsd.go | 2 +- plugins/inputs/sysstat/sysstat.go | 2 +- plugins/inputs/system/cpu_test.go | 6 +++--- plugins/inputs/win_perf_counters/pdh.go | 2 +- plugins/inputs/win_perf_counters/win_perf_counters.go | 2 +- plugins/outputs/graphite/graphite.go | 2 +- plugins/outputs/influxdb/influxdb.go | 4 ++-- plugins/outputs/opentsdb/opentsdb_test.go | 2 +- plugins/outputs/riemann_legacy/riemann.go | 2 +- 26 files changed, 33 insertions(+), 33 deletions(-) diff --git a/agent/agent.go b/agent/agent.go index a591bc8d1..af96718cd 100644 --- a/agent/agent.go +++ b/agent/agent.go @@ -252,7 +252,7 @@ func (a *Agent) flusher(shutdown chan struct{}, metricC chan telegraf.Metric, ag // the flusher will flush after metrics are collected. time.Sleep(time.Millisecond * 300) - // create an output metric channel and a gorouting that continously passes + // create an output metric channel and a gorouting that continuously passes // each metric onto the output plugins & aggregators. outMetricC := make(chan telegraf.Metric, 100) var wg sync.WaitGroup diff --git a/internal/config/config.go b/internal/config/config.go index 61263f49a..98d68f9ef 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -126,7 +126,7 @@ type AgentConfig struct { // TODO(cam): Remove UTC and parameter, they are no longer // valid for the agent config. Leaving them here for now for backwards- - // compatability + // compatibility UTC bool `toml:"utc"` // Debug is the option for running in debug mode @@ -683,7 +683,7 @@ func (c *Config) LoadConfig(path string) error { } // trimBOM trims the Byte-Order-Marks from the beginning of the file. -// this is for Windows compatability only. +// this is for Windows compatibility only. // see https://github.com/influxdata/telegraf/issues/1378 func trimBOM(f []byte) []byte { return bytes.TrimPrefix(f, []byte("\xef\xbb\xbf")) diff --git a/metric/parse.go b/metric/parse.go index 72057fb39..1acf30094 100644 --- a/metric/parse.go +++ b/metric/parse.go @@ -647,7 +647,7 @@ func skipWhitespace(buf []byte, i int) int { } // makeError is a helper function for making a metric parsing error. -// reason is the reason that the error occured. +// reason is the reason why the error occurred. // buf should be the current buffer we are parsing. // i is the current index, to give some context on where in the buffer we are. func makeError(reason string, buf []byte, i int) error { diff --git a/metric/reader_test.go b/metric/reader_test.go index 645b618eb..1f2ffd6ea 100644 --- a/metric/reader_test.go +++ b/metric/reader_test.go @@ -181,7 +181,7 @@ func TestMetricReader_SplitWithExactLengthSplit(t *testing.T) { } } -// Regresssion test for when a metric requires to be split and one of the +// Regression test for when a metric requires to be split and one of the // split metrics is larger than the buffer. // // Previously the metric index would be set incorrectly causing a panic. @@ -218,7 +218,7 @@ func TestMetricReader_SplitOverflowOversized(t *testing.T) { } } -// Regresssion test for when a split metric exactly fits in the buffer. +// Regression test for when a split metric exactly fits in the buffer. // // Previously the metric would be overflow split when not required. func TestMetricReader_SplitOverflowUneeded(t *testing.T) { diff --git a/plugins/inputs/cloudwatch/cloudwatch.go b/plugins/inputs/cloudwatch/cloudwatch.go index 733a17640..2fac94ad8 100644 --- a/plugins/inputs/cloudwatch/cloudwatch.go +++ b/plugins/inputs/cloudwatch/cloudwatch.go @@ -92,7 +92,7 @@ func (c *CloudWatch) SampleConfig() string { ## Collection Delay (required - must account for metrics availability via CloudWatch API) delay = "5m" - ## Recomended: use metric 'interval' that is a multiple of 'period' to avoid + ## Recommended: use metric 'interval' that is a multiple of 'period' to avoid ## gaps or overlap in pulled data interval = "5m" diff --git a/plugins/inputs/couchbase/couchbase.go b/plugins/inputs/couchbase/couchbase.go index 35751b727..f773f5d5b 100644 --- a/plugins/inputs/couchbase/couchbase.go +++ b/plugins/inputs/couchbase/couchbase.go @@ -21,7 +21,7 @@ var sampleConfig = ` ## http://admin:secret@couchbase-0.example.com:8091/ ## ## If no servers are specified, then localhost is used as the host. - ## If no protocol is specifed, HTTP is used. + ## If no protocol is specified, HTTP is used. ## If no port is specified, 8091 is used. servers = ["http://localhost:8091"] ` diff --git a/plugins/inputs/dns_query/dns_query.go b/plugins/inputs/dns_query/dns_query.go index 98b3dce73..04ce8328d 100644 --- a/plugins/inputs/dns_query/dns_query.go +++ b/plugins/inputs/dns_query/dns_query.go @@ -17,7 +17,7 @@ type DnsQuery struct { // Domains or subdomains to query Domains []string - // Network protocl name + // Network protocol name Network string // Server to query diff --git a/plugins/inputs/ipmi_sensor/ipmi.go b/plugins/inputs/ipmi_sensor/ipmi.go index 3b03e62ad..9448208bd 100644 --- a/plugins/inputs/ipmi_sensor/ipmi.go +++ b/plugins/inputs/ipmi_sensor/ipmi.go @@ -35,7 +35,7 @@ var sampleConfig = ` ## # servers = ["USERID:PASSW0RD@lan(192.168.1.1)"] - ## Recomended: use metric 'interval' that is a multiple of 'timeout' to avoid + ## Recommended: use metric 'interval' that is a multiple of 'timeout' to avoid ## gaps or overlap in pulled data interval = "30s" diff --git a/plugins/inputs/iptables/iptables_test.go b/plugins/inputs/iptables/iptables_test.go index cf2f9f913..a98c24190 100644 --- a/plugins/inputs/iptables/iptables_test.go +++ b/plugins/inputs/iptables/iptables_test.go @@ -81,7 +81,7 @@ func TestIptables_Gather(t *testing.T) { K 4520 RETURN tcp -- * * 0.0.0.0/0 0.0.0.0/0 `}, }, - { // 8 - Multiple rows, multipe chains => no error + { // 8 - Multiple rows, multiple chains => no error table: "filter", chains: []string{"INPUT", "FORWARD"}, values: []string{ diff --git a/plugins/inputs/mesos/mesos.go b/plugins/inputs/mesos/mesos.go index 26b55c352..e37eabf5d 100644 --- a/plugins/inputs/mesos/mesos.go +++ b/plugins/inputs/mesos/mesos.go @@ -367,7 +367,7 @@ func getMetrics(role Role, group string) []string { ret, ok := m[group] if !ok { - log.Printf("I! [mesos] Unkown %s metrics group: %s\n", role, group) + log.Printf("I! [mesos] Unknown %s metrics group: %s\n", role, group) return []string{} } diff --git a/plugins/inputs/minecraft/internal/rcon/rcon.go b/plugins/inputs/minecraft/internal/rcon/rcon.go index 118771242..a57d75629 100644 --- a/plugins/inputs/minecraft/internal/rcon/rcon.go +++ b/plugins/inputs/minecraft/internal/rcon/rcon.go @@ -197,7 +197,7 @@ func (c *Client) Send(typ int32, command string) (response *Packet, err error) { } // NewClient creates a new Client type, creating the connection -// to the server specified by the host and port arguements. If +// to the server specified by the host and port arguments. If // the connection fails, an error is returned. func NewClient(host string, port int) (client *Client, err error) { client = new(Client) diff --git a/plugins/inputs/minecraft/minecraft.go b/plugins/inputs/minecraft/minecraft.go index 04b3f4ad1..6debbd25b 100644 --- a/plugins/inputs/minecraft/minecraft.go +++ b/plugins/inputs/minecraft/minecraft.go @@ -47,7 +47,7 @@ func (s *Minecraft) SampleConfig() string { return sampleConfig } -// Gather uses the RCON protocal to collect player and +// Gather uses the RCON protocol to collect player and // scoreboard stats from a minecraft server. //var hasClient bool = false func (s *Minecraft) Gather(acc telegraf.Accumulator) error { diff --git a/plugins/inputs/minecraft/rcon.go b/plugins/inputs/minecraft/rcon.go index 219a1d14f..f42fc8ba4 100644 --- a/plugins/inputs/minecraft/rcon.go +++ b/plugins/inputs/minecraft/rcon.go @@ -76,7 +76,7 @@ func newClient(server, port string) (*rcon.Client, error) { return client, nil } -// Gather recieves all player scoreboard information and returns it per player. +// Gather receives all player scoreboard information and returns it per player. func (r *RCON) Gather(producer RCONClientProducer) ([]string, error) { if r.client == nil { var err error diff --git a/plugins/inputs/phpfpm/fcgi_client.go b/plugins/inputs/phpfpm/fcgi_client.go index 56978ad3a..5a4d20019 100644 --- a/plugins/inputs/phpfpm/fcgi_client.go +++ b/plugins/inputs/phpfpm/fcgi_client.go @@ -59,7 +59,7 @@ func (client *conn) Request( rec := &record{} var err1 error - // recive untill EOF or FCGI_END_REQUEST + // recive until EOF or FCGI_END_REQUEST READ_LOOP: for { err1 = rec.read(client.rwc) diff --git a/plugins/inputs/phpfpm/phpfpm_test.go b/plugins/inputs/phpfpm/phpfpm_test.go index 80a5d4bcf..2f470c72b 100644 --- a/plugins/inputs/phpfpm/phpfpm_test.go +++ b/plugins/inputs/phpfpm/phpfpm_test.go @@ -62,7 +62,7 @@ func TestPhpFpmGeneratesMetrics_From_Fcgi(t *testing.T) { // Let OS find an available port tcp, err := net.Listen("tcp", "127.0.0.1:0") if err != nil { - t.Fatal("Cannot initalize test server") + t.Fatal("Cannot initialize test server") } defer tcp.Close() @@ -106,7 +106,7 @@ func TestPhpFpmGeneratesMetrics_From_Socket(t *testing.T) { binary.Read(rand.Reader, binary.LittleEndian, &randomNumber) tcp, err := net.Listen("unix", fmt.Sprintf("/tmp/test-fpm%d.sock", randomNumber)) if err != nil { - t.Fatal("Cannot initalize server on port ") + t.Fatal("Cannot initialize server on port ") } defer tcp.Close() @@ -150,7 +150,7 @@ func TestPhpFpmGeneratesMetrics_From_Socket_Custom_Status_Path(t *testing.T) { binary.Read(rand.Reader, binary.LittleEndian, &randomNumber) tcp, err := net.Listen("unix", fmt.Sprintf("/tmp/test-fpm%d.sock", randomNumber)) if err != nil { - t.Fatal("Cannot initalize server on port ") + t.Fatal("Cannot initialize server on port ") } defer tcp.Close() diff --git a/plugins/inputs/ping/ping_windows.go b/plugins/inputs/ping/ping_windows.go index 9645d175a..862c82c5f 100644 --- a/plugins/inputs/ping/ping_windows.go +++ b/plugins/inputs/ping/ping_windows.go @@ -175,7 +175,7 @@ func (p *Ping) Gather(acc telegraf.Accumulator) error { totalTimeout := p.timeout() * float64(p.Count) out, err := p.pingHost(totalTimeout, args...) // ping host return exitcode != 0 also when there was no response from host - // but command was execute succesfully + // but command was execute successfully if err != nil { // Combine go err + stderr output pendingError = errors.New(strings.TrimSpace(out) + ", " + err.Error()) diff --git a/plugins/inputs/powerdns/powerdns_test.go b/plugins/inputs/powerdns/powerdns_test.go index df36440de..56666a886 100644 --- a/plugins/inputs/powerdns/powerdns_test.go +++ b/plugins/inputs/powerdns/powerdns_test.go @@ -76,7 +76,7 @@ func TestMemcachedGeneratesMetrics(t *testing.T) { binary.Read(rand.Reader, binary.LittleEndian, &randomNumber) socket, err := net.Listen("unix", fmt.Sprintf("/tmp/pdns%d.controlsocket", randomNumber)) if err != nil { - t.Fatal("Cannot initalize server on port ") + t.Fatal("Cannot initialize server on port ") } defer socket.Close() diff --git a/plugins/inputs/statsd/statsd.go b/plugins/inputs/statsd/statsd.go index 93819cb09..9f711d913 100644 --- a/plugins/inputs/statsd/statsd.go +++ b/plugins/inputs/statsd/statsd.go @@ -66,7 +66,7 @@ type Statsd struct { // MetricSeparator is the separator between parts of the metric name. MetricSeparator string - // This flag enables parsing of tags in the dogstatsd extention to the + // This flag enables parsing of tags in the dogstatsd extension to the // statsd protocol (http://docs.datadoghq.com/guides/dogstatsd/) ParseDataDogTags bool diff --git a/plugins/inputs/sysstat/sysstat.go b/plugins/inputs/sysstat/sysstat.go index 551cd37aa..1daa85730 100644 --- a/plugins/inputs/sysstat/sysstat.go +++ b/plugins/inputs/sysstat/sysstat.go @@ -100,7 +100,7 @@ var sampleConfig = ` # # ## Options for the sadf command. The values on the left represent the sadf - ## options and the values on the right their description (wich are used for + ## options and the values on the right their description (which are used for ## grouping and prefixing metrics). ## ## Run 'sar -h' or 'man sar' to find out the supported options for your diff --git a/plugins/inputs/system/cpu_test.go b/plugins/inputs/system/cpu_test.go index 773f8e7d1..43825fca7 100644 --- a/plugins/inputs/system/cpu_test.go +++ b/plugins/inputs/system/cpu_test.go @@ -54,7 +54,7 @@ func TestCPUStats(t *testing.T) { err := cs.Gather(&acc) require.NoError(t, err) - // Computed values are checked with delta > 0 becasue of floating point arithmatic + // Computed values are checked with delta > 0 because of floating point arithmatic // imprecision assertContainsTaggedFloat(t, &acc, "cpu", "time_user", 8.8, 0, cputags) assertContainsTaggedFloat(t, &acc, "cpu", "time_system", 8.2, 0, cputags) @@ -105,7 +105,7 @@ func TestCPUStats(t *testing.T) { // specific tags within a certain distance of a given expected value. Asserts a failure // if the measurement is of the wrong type, or if no matching measurements are found // -// Paramaters: +// Parameters: // t *testing.T : Testing object to use // acc testutil.Accumulator: Accumulator to examine // measurement string : Name of the measurement to examine @@ -224,7 +224,7 @@ func TestCPUTimesDecrease(t *testing.T) { err := cs.Gather(&acc) require.NoError(t, err) - // Computed values are checked with delta > 0 becasue of floating point arithmatic + // Computed values are checked with delta > 0 because of floating point arithmatic // imprecision assertContainsTaggedFloat(t, &acc, "cpu", "time_user", 18, 0, cputags) assertContainsTaggedFloat(t, &acc, "cpu", "time_idle", 80, 0, cputags) diff --git a/plugins/inputs/win_perf_counters/pdh.go b/plugins/inputs/win_perf_counters/pdh.go index 2caa21445..a0b693506 100644 --- a/plugins/inputs/win_perf_counters/pdh.go +++ b/plugins/inputs/win_perf_counters/pdh.go @@ -347,7 +347,7 @@ func PdhGetFormattedCounterValueDouble(hCounter PDH_HCOUNTER, lpdwType *uint32, // // okPath := "\\Process(*)\\% Processor Time" // notice the wildcard * character // -// // ommitted all necessary stuff ... +// // omitted all necessary stuff ... // // var bufSize uint32 // var bufCount uint32 diff --git a/plugins/inputs/win_perf_counters/win_perf_counters.go b/plugins/inputs/win_perf_counters/win_perf_counters.go index 5f1c1a11b..7e3991d19 100644 --- a/plugins/inputs/win_perf_counters/win_perf_counters.go +++ b/plugins/inputs/win_perf_counters/win_perf_counters.go @@ -110,7 +110,7 @@ func (m *Win_PerfCounters) AddItem(query string, objectName string, counter stri ret = PdhAddEnglishCounter(handle, query, 0, &counterHandle) } - // Call PdhCollectQueryData one time to check existance of the counter + // Call PdhCollectQueryData one time to check existence of the counter ret = PdhCollectQueryData(handle) if ret != ERROR_SUCCESS { PdhCloseQuery(handle) diff --git a/plugins/outputs/graphite/graphite.go b/plugins/outputs/graphite/graphite.go index 180217214..53c4bdc1b 100644 --- a/plugins/outputs/graphite/graphite.go +++ b/plugins/outputs/graphite/graphite.go @@ -16,7 +16,7 @@ import ( ) type Graphite struct { - // URL is only for backwards compatability + // URL is only for backwards compatibility Servers []string Prefix string Template string diff --git a/plugins/outputs/influxdb/influxdb.go b/plugins/outputs/influxdb/influxdb.go index 71320d01e..e2b20caa4 100644 --- a/plugins/outputs/influxdb/influxdb.go +++ b/plugins/outputs/influxdb/influxdb.go @@ -22,7 +22,7 @@ var ( // InfluxDB struct is the primary data structure for the plugin type InfluxDB struct { - // URL is only for backwards compatability + // URL is only for backwards compatibility URL string URLs []string `toml:"urls"` Username string @@ -100,7 +100,7 @@ func (i *InfluxDB) Connect() error { var urls []string urls = append(urls, i.URLs...) - // Backward-compatability with single Influx URL config files + // Backward-compatibility with single Influx URL config files // This could eventually be removed in favor of specifying the urls as a list if i.URL != "" { urls = append(urls, i.URL) diff --git a/plugins/outputs/opentsdb/opentsdb_test.go b/plugins/outputs/opentsdb/opentsdb_test.go index 83a659216..d5d7aa7e9 100644 --- a/plugins/outputs/opentsdb/opentsdb_test.go +++ b/plugins/outputs/opentsdb/opentsdb_test.go @@ -183,7 +183,7 @@ func BenchmarkHttpSend(b *testing.B) { // err = o.Write(testutil.MockMetrics()) // require.NoError(t, err) -// // Verify postive and negative test cases of writing data +// // Verify positive and negative test cases of writing data // metrics := testutil.MockMetrics() // metrics = append(metrics, testutil.TestMetric(float64(1.0), // "justametric.float")) diff --git a/plugins/outputs/riemann_legacy/riemann.go b/plugins/outputs/riemann_legacy/riemann.go index 69de7f521..a1b140436 100644 --- a/plugins/outputs/riemann_legacy/riemann.go +++ b/plugins/outputs/riemann_legacy/riemann.go @@ -84,7 +84,7 @@ func (r *Riemann) Write(metrics []telegraf.Metric) error { var senderr = r.client.SendMulti(events) if senderr != nil { - r.Close() // always retuns nil + r.Close() // always returns nil return fmt.Errorf("FAILED to send riemann message (will try to reconnect). Error: %s\n", senderr) } From 76ed70340b98817c7dd15c3323b455adb79b4a81 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Patric=20Kanngie=C3=9Fer?= Date: Wed, 1 Nov 2017 21:28:00 +0100 Subject: [PATCH 86/95] Add Teamspeak 3 input plugin (#3315) --- Godeps | 2 + docs/LICENSE_OF_DEPENDENCIES.md | 2 + plugins/inputs/all/all.go | 1 + plugins/inputs/teamspeak/README.md | 45 ++++++++++ plugins/inputs/teamspeak/teamspeak.go | 100 +++++++++++++++++++++ plugins/inputs/teamspeak/teamspeak_test.go | 87 ++++++++++++++++++ 6 files changed, 237 insertions(+) create mode 100644 plugins/inputs/teamspeak/README.md create mode 100644 plugins/inputs/teamspeak/teamspeak.go create mode 100644 plugins/inputs/teamspeak/teamspeak_test.go diff --git a/Godeps b/Godeps index 0802675ba..02e20755e 100644 --- a/Godeps +++ b/Godeps @@ -40,6 +40,8 @@ github.com/kballard/go-shellquote d8ec1a69a250a17bb0e419c386eac1f3711dc142 github.com/matttproud/golang_protobuf_extensions c12348ce28de40eed0136aa2b644d0ee0650e56c github.com/Microsoft/go-winio ce2922f643c8fd76b46cadc7f404a06282678b34 github.com/miekg/dns 99f84ae56e75126dd77e5de4fae2ea034a468ca1 +github.com/mitchellh/mapstructure d0303fe809921458f417bcf828397a65db30a7e4 +github.com/multiplay/go-ts3 07477f49b8dfa3ada231afc7b7b17617d42afe8e github.com/naoina/go-stringutil 6b638e95a32d0c1131db0e7fe83775cbea4a0d0b github.com/nats-io/go-nats ea9585611a4ab58a205b9b125ebd74c389a6b898 github.com/nats-io/nats ea9585611a4ab58a205b9b125ebd74c389a6b898 diff --git a/docs/LICENSE_OF_DEPENDENCIES.md b/docs/LICENSE_OF_DEPENDENCIES.md index 3774d34ce..ffd9751d7 100644 --- a/docs/LICENSE_OF_DEPENDENCIES.md +++ b/docs/LICENSE_OF_DEPENDENCIES.md @@ -82,6 +82,8 @@ following works: - github.com/streadway/amqp [BSD](https://github.com/streadway/amqp/blob/master/LICENSE) - github.com/stretchr/objx [MIT](https://github.com/stretchr/objx/blob/master/LICENSE.md) - github.com/stretchr/testify [MIT](https://github.com/stretchr/testify/blob/master/LICENCE.txt) +- github.com/mitchellh/mapstructure [MIT](https://github.com/mitchellh/mapstructure/blob/master/LICENSE) +- github.com/multiplay/go-ts3 [BSD](https://github.com/multiplay/go-ts3/blob/master/LICENSE) - github.com/vjeantet/grok [APACHE](https://github.com/vjeantet/grok/blob/master/LICENSE) - github.com/wvanbergen/kafka [MIT](https://github.com/wvanbergen/kafka/blob/master/LICENSE) - github.com/wvanbergen/kazoo-go [MIT](https://github.com/wvanbergen/kazoo-go/blob/master/MIT-LICENSE) diff --git a/plugins/inputs/all/all.go b/plugins/inputs/all/all.go index 1f4d3825c..235169c86 100644 --- a/plugins/inputs/all/all.go +++ b/plugins/inputs/all/all.go @@ -86,6 +86,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/inputs/system" _ "github.com/influxdata/telegraf/plugins/inputs/tail" _ "github.com/influxdata/telegraf/plugins/inputs/tcp_listener" + _ "github.com/influxdata/telegraf/plugins/inputs/teamspeak" _ "github.com/influxdata/telegraf/plugins/inputs/tomcat" _ "github.com/influxdata/telegraf/plugins/inputs/trig" _ "github.com/influxdata/telegraf/plugins/inputs/twemproxy" diff --git a/plugins/inputs/teamspeak/README.md b/plugins/inputs/teamspeak/README.md new file mode 100644 index 000000000..84c4297e8 --- /dev/null +++ b/plugins/inputs/teamspeak/README.md @@ -0,0 +1,45 @@ +# Teamspeak 3 Input Plugin + +This plugin uses the Teamspeak 3 ServerQuery interface of the Teamspeak server to collect statistics of one or more +virtual servers. If you are querying an external Teamspeak server, make sure to add the host which is running Telegraf +to query_ip_whitelist.txt in the Teamspeak Server directory. For information about how to configure the server take a look +the [Teamspeak 3 ServerQuery Manual](http://media.teamspeak.com/ts3_literature/TeamSpeak%203%20Server%20Query%20Manual.pdf) + +### Configuration: + +``` +# Reads metrics from a Teamspeak 3 Server via ServerQuery +[[inputs.teamspeak]] + ## Server address for Teamspeak 3 ServerQuery + # server = "127.0.0.1:10011" + ## Username for ServerQuery + username = "serverqueryuser" + ## Password for ServerQuery + password = "secret" + ## Array of virtual servers + # virtual_servers = [1] +``` + +### Measurements: + +- teamspeak + - uptime + - clients_online + - total_ping + - total_packet_loss + - packets_sent_total + - packets_received_total + - bytes_sent_total + - bytes_received_total + +### Tags: + +- The following tags are used: + - virtual_server + - name + +### Example output: + +``` +teamspeak,virtual_server=1,name=LeopoldsServer,host=vm01 bytes_received_total=29638202639i,uptime=13567846i,total_ping=26.89,total_packet_loss=0,packets_sent_total=415821252i,packets_received_total=237069900i,bytes_sent_total=55309568252i,clients_online=11i 1507406561000000000 +``` \ No newline at end of file diff --git a/plugins/inputs/teamspeak/teamspeak.go b/plugins/inputs/teamspeak/teamspeak.go new file mode 100644 index 000000000..91fdf1135 --- /dev/null +++ b/plugins/inputs/teamspeak/teamspeak.go @@ -0,0 +1,100 @@ +package teamspeak + +import ( + "github.com/multiplay/go-ts3" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/inputs" + "strconv" +) + +type Teamspeak struct { + Server string + Username string + Password string + VirtualServers []int `toml:"virtual_servers"` + + client *ts3.Client + connected bool +} + +func (ts *Teamspeak) Description() string { + return "Reads metrics from a Teamspeak 3 Server via ServerQuery" +} + +const sampleConfig = ` + ## Server address for Teamspeak 3 ServerQuery + # server = "127.0.0.1:10011" + ## Username for ServerQuery + username = "serverqueryuser" + ## Password for ServerQuery + password = "secret" + ## Array of virtual servers + # virtual_servers = [1] +` + +func (ts *Teamspeak) SampleConfig() string { + return sampleConfig +} + +func (ts *Teamspeak) Gather(acc telegraf.Accumulator) error { + var err error + + if !ts.connected { + ts.client, err = ts3.NewClient(ts.Server) + if err != nil { + return err + } + + err = ts.client.Login(ts.Username, ts.Password) + if err != nil { + return err + } + + ts.connected = true + } + + for _, vserver := range ts.VirtualServers { + ts.client.Use(vserver) + + sm, err := ts.client.Server.Info() + if err != nil { + ts.connected = false + return err + } + + sc, err := ts.client.Server.ServerConnectionInfo() + if err != nil { + ts.connected = false + return err + } + + tags := map[string]string{ + "virtual_server": strconv.Itoa(sm.ID), + "name": sm.Name, + } + + fields := map[string]interface{}{ + "uptime": sm.Uptime, + "clients_online": sm.ClientsOnline, + "total_ping": sm.TotalPing, + "total_packet_loss": sm.TotalPacketLossTotal, + "packets_sent_total": sc.PacketsSentTotal, + "packets_received_total": sc.PacketsReceivedTotal, + "bytes_sent_total": sc.BytesSentTotal, + "bytes_received_total": sc.BytesReceivedTotal, + } + + acc.AddFields("teamspeak", fields, tags) + } + return nil +} + +func init() { + inputs.Add("teamspeak", func() telegraf.Input { + return &Teamspeak{ + Server: "127.0.0.1:10011", + VirtualServers: []int{1}, + } + }) +} diff --git a/plugins/inputs/teamspeak/teamspeak_test.go b/plugins/inputs/teamspeak/teamspeak_test.go new file mode 100644 index 000000000..b66948f28 --- /dev/null +++ b/plugins/inputs/teamspeak/teamspeak_test.go @@ -0,0 +1,87 @@ +package teamspeak + +import ( + "bufio" + "net" + "strings" + "testing" + + "github.com/influxdata/telegraf/testutil" +) + +const welcome = `Welcome to the TeamSpeak 3 ServerQuery interface, type "help" for a list of commands and "help " for information on a specific command.` +const ok = `error id=0 msg=ok` +const errorMsg = `error id=256 msg=command\snot\sfound` + +var cmd = map[string]string{ + "login": "", + "use": "", + "serverinfo": `virtualserver_unique_identifier=a1vn9PLF8CMIU virtualserver_name=Testserver virtualserver_welcomemessage=Test virtualserver_platform=Linux virtualserver_version=3.0.13.8\s[Build:\s1500452811] virtualserver_maxclients=32 virtualserver_password virtualserver_clientsonline=2 virtualserver_channelsonline=1 virtualserver_created=1507400243 virtualserver_uptime=148 virtualserver_codec_encryption_mode=0 virtualserver_hostmessage virtualserver_hostmessage_mode=0 virtualserver_filebase=files\/virtualserver_1 virtualserver_default_server_group=8 virtualserver_default_channel_group=8 virtualserver_flag_password=0 virtualserver_default_channel_admin_group=5 virtualserver_max_download_total_bandwidth=18446744073709551615 virtualserver_max_upload_total_bandwidth=18446744073709551615 virtualserver_hostbanner_url virtualserver_hostbanner_gfx_url virtualserver_hostbanner_gfx_interval=0 virtualserver_complain_autoban_count=5 virtualserver_complain_autoban_time=1200 virtualserver_complain_remove_time=3600 virtualserver_min_clients_in_channel_before_forced_silence=100 virtualserver_priority_speaker_dimm_modificator=-18.0000 virtualserver_id=1 virtualserver_antiflood_points_tick_reduce=5 virtualserver_antiflood_points_needed_command_block=150 virtualserver_antiflood_points_needed_ip_block=250 virtualserver_client_connections=1 virtualserver_query_client_connections=1 virtualserver_hostbutton_tooltip virtualserver_hostbutton_url virtualserver_hostbutton_gfx_url virtualserver_queryclientsonline=1 virtualserver_download_quota=18446744073709551615 virtualserver_upload_quota=18446744073709551615 virtualserver_month_bytes_downloaded=0 virtualserver_month_bytes_uploaded=0 virtualserver_total_bytes_downloaded=0 virtualserver_total_bytes_uploaded=0 virtualserver_port=9987 virtualserver_autostart=1 virtualserver_machine_id virtualserver_needed_identity_security_level=8 virtualserver_log_client=0 virtualserver_log_query=0 virtualserver_log_channel=0 virtualserver_log_permissions=1 virtualserver_log_server=0 virtualserver_log_filetransfer=0 virtualserver_min_client_version=1445512488 virtualserver_name_phonetic virtualserver_icon_id=0 virtualserver_reserved_slots=0 virtualserver_total_packetloss_speech=0.0000 virtualserver_total_packetloss_keepalive=0.0000 virtualserver_total_packetloss_control=0.0000 virtualserver_total_packetloss_total=0.0000 virtualserver_total_ping=1.0000 virtualserver_ip=0.0.0.0,\s:: virtualserver_weblist_enabled=1 virtualserver_ask_for_privilegekey=0 virtualserver_hostbanner_mode=0 virtualserver_channel_temp_delete_delay_default=0 virtualserver_min_android_version=1407159763 virtualserver_min_ios_version=1407159763 virtualserver_status=online connection_filetransfer_bandwidth_sent=0 connection_filetransfer_bandwidth_received=0 connection_filetransfer_bytes_sent_total=0 connection_filetransfer_bytes_received_total=0 connection_packets_sent_speech=0 connection_bytes_sent_speech=0 connection_packets_received_speech=0 connection_bytes_received_speech=0 connection_packets_sent_keepalive=261 connection_bytes_sent_keepalive=10701 connection_packets_received_keepalive=261 connection_bytes_received_keepalive=10961 connection_packets_sent_control=54 connection_bytes_sent_control=15143 connection_packets_received_control=55 connection_bytes_received_control=4239 connection_packets_sent_total=315 connection_bytes_sent_total=25844 connection_packets_received_total=316 connection_bytes_received_total=15200 connection_bandwidth_sent_last_second_total=81 connection_bandwidth_sent_last_minute_total=141 connection_bandwidth_received_last_second_total=83 connection_bandwidth_received_last_minute_total=98`, + "serverrequestconnectioninfo": `connection_filetransfer_bandwidth_sent=0 connection_filetransfer_bandwidth_received=0 connection_filetransfer_bytes_sent_total=0 connection_filetransfer_bytes_received_total=0 connection_packets_sent_total=369 connection_bytes_sent_total=28058 connection_packets_received_total=370 connection_bytes_received_total=17468 connection_bandwidth_sent_last_second_total=81 connection_bandwidth_sent_last_minute_total=109 connection_bandwidth_received_last_second_total=83 connection_bandwidth_received_last_minute_total=94 connection_connected_time=174 connection_packetloss_total=0.0000 connection_ping=1.0000`, +} + +func TestGather(t *testing.T) { + l, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatal("Initializing test server failed") + } + defer l.Close() + + go handleRequest(l, t) + + var acc testutil.Accumulator + testConfig := Teamspeak{ + Server: l.Addr().String(), + Username: "serveradmin", + Password: "test", + VirtualServers: []int{1}, + } + err = testConfig.Gather(&acc) + + if err != nil { + t.Fatalf("Gather returned error. Error: %s\n", err) + } + + fields := map[string]interface{}{ + "uptime": int(148), + "clients_online": int(2), + "total_ping": float32(1.0000), + "total_packet_loss": float64(0.0000), + "packets_sent_total": uint64(369), + "packets_received_total": uint64(370), + "bytes_sent_total": uint64(28058), + "bytes_received_total": uint64(17468), + } + + acc.AssertContainsFields(t, "teamspeak", fields) +} + +func handleRequest(l net.Listener, t *testing.T) { + c, err := l.Accept() + if err != nil { + t.Fatal("Error accepting test connection") + } + c.Write([]byte("TS3\n\r" + welcome + "\n\r")) + for { + msg, _, err := bufio.NewReader(c).ReadLine() + if err != nil { + return + } + r, exists := cmd[strings.Split(string(msg), " ")[0]] + + if exists { + switch r { + case "": + c.Write([]byte(ok + "\n\r")) + case "quit": + c.Write([]byte(ok + "\n\r")) + c.Close() + return + default: + c.Write([]byte(r + "\n\r" + ok + "\n\r")) + } + } else { + c.Write([]byte(errorMsg + "\n\r")) + } + } +} From 26ccc1f20518fc153affc69ab9507e5e4c4a31f1 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 1 Nov 2017 13:30:43 -0700 Subject: [PATCH 87/95] Add teamspeak to readme and update changelog --- CHANGELOG.md | 2 ++ README.md | 1 + 2 files changed, 3 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8aee56f33..f527fdca3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,7 @@ - [jolokia2](./plugins/inputs/jolokia2/README.md) - Thanks to @dylanmei - [nginx_plus](./plugins/inputs/nginx_plus/README.md) - Thanks to @mplonka & @poblahblahblah - [smart](./plugins/inputs/smart/README.md) - Thanks to @rickard-von-essen +- [teamspeak](./plugins/inputs/teamspeak/README.md) - Thanks to @p4ddy1 - [wavefront](./plugins/outputs/wavefront/README.md) - Thanks to @puckpuck ### Release Notes @@ -50,6 +51,7 @@ - [#3333](https://github.com/influxdata/telegraf/issues/3333): Perform DNS lookup before ping and report result. - [#3398](https://github.com/influxdata/telegraf/issues/3398): Add instance name option to varnish plugin. - [#3406](https://github.com/influxdata/telegraf/pull/3406): Add support for SSL settings to ElasticSearch output plugin. +- [#3315](https://github.com/influxdata/telegraf/pull/3315): Add Teamspeak 3 input plugin. ### Bugfixes diff --git a/README.md b/README.md index 62d725083..0f0f85ba2 100644 --- a/README.md +++ b/README.md @@ -196,6 +196,7 @@ configuration options. * [snmp](./plugins/inputs/snmp) * [snmp_legacy](./plugins/inputs/snmp_legacy) * [sql server](./plugins/inputs/sqlserver) (microsoft) +* [teamspeak](./plugins/inputs/teamspeak) * [tomcat](./plugins/inputs/tomcat) * [twemproxy](./plugins/inputs/twemproxy) * [varnish](./plugins/inputs/varnish) From 9b0af4478b678fbfa346ec773ecb7638eb9f6ef8 Mon Sep 17 00:00:00 2001 From: Daniel Nelson Date: Wed, 1 Nov 2017 18:17:51 -0700 Subject: [PATCH 88/95] Remove incorrect comment about linker options --- cmd/telegraf/telegraf.go | 3 --- 1 file changed, 3 deletions(-) diff --git a/cmd/telegraf/telegraf.go b/cmd/telegraf/telegraf.go index 42fd6cf2f..9e286e362 100644 --- a/cmd/telegraf/telegraf.go +++ b/cmd/telegraf/telegraf.go @@ -55,9 +55,6 @@ var fUsage = flag.String("usage", "", var fService = flag.String("service", "", "operate on the service") -// Telegraf version, populated linker. -// ie, -ldflags "-X main.version=`git describe --always --tags`" - var ( nextVersion = "1.5.0" version string From ba462f5c9474407160123cda908db7fdb4bb1daa Mon Sep 17 00:00:00 2001 From: "David G. Simmons" Date: Mon, 2 Oct 2017 16:50:23 -0400 Subject: [PATCH 89/95] New Particle Plugin --- .txt | 0 plugins/inputs/webhooks/particle/README.md | 26 +++++++++ .../webhooks/particle/particle_webhooks.go | 48 ++++++++++++++++ .../particle/particle_webhooks_events.go | 22 +++++++ .../particle_webhooks_events_json_test.go | 39 +++++++++++++ .../particle/particle_webhooks_test.go | 57 +++++++++++++++++++ plugins/inputs/webhooks/webhooks.go | 5 ++ plugins/inputs/webhooks/webhooks_test.go | 6 ++ 8 files changed, 203 insertions(+) create mode 100644 .txt create mode 100644 plugins/inputs/webhooks/particle/README.md create mode 100644 plugins/inputs/webhooks/particle/particle_webhooks.go create mode 100644 plugins/inputs/webhooks/particle/particle_webhooks_events.go create mode 100644 plugins/inputs/webhooks/particle/particle_webhooks_events_json_test.go create mode 100644 plugins/inputs/webhooks/particle/particle_webhooks_test.go diff --git a/.txt b/.txt new file mode 100644 index 000000000..e69de29bb diff --git a/plugins/inputs/webhooks/particle/README.md b/plugins/inputs/webhooks/particle/README.md new file mode 100644 index 000000000..1212b742a --- /dev/null +++ b/plugins/inputs/webhooks/particle/README.md @@ -0,0 +1,26 @@ +# particle webhooks + +You should configure your Rollbar's Webhooks to point at the `webhooks` service. To do this go to `particle.com/` and click `Settings > Notifications > Webhook`. In the resulting page set `URL` to `http://:1619/particle`, and click on `Enable Webhook Integration`. + +## Events + +Your Particle device should publish an event that contains a JSON in the form of: +``` +String data = String::format("{ \"tags\" : { + \"tag_name\": \"tag_value\", + \"other_tag\": \"other_value\" + }, + \"values\": { + \"value_name\": %f, + \"other_value\": %f, + } + }", value_value, other_value + ); + Particle.publish("event_name", data, PRIVATE); +``` +Escaping the "" is required in the source file. +The number of tag values and field values is not restrictied so you can send as many values per webhook call as you'd like. + +You will need to enable JSON messages in the Webhooks setup of Particle.io + +See [webhook doc](https://docs.particle.io/reference/webhooks/) diff --git a/plugins/inputs/webhooks/particle/particle_webhooks.go b/plugins/inputs/webhooks/particle/particle_webhooks.go new file mode 100644 index 000000000..813bd0665 --- /dev/null +++ b/plugins/inputs/webhooks/particle/particle_webhooks.go @@ -0,0 +1,48 @@ +package particle + +import ( + "encoding/json" + "github.com/gorilla/mux" + "github.com/influxdata/telegraf" + "io/ioutil" + "log" + "net/http" + "time" +) + +type ParticleWebhook struct { + Path string + acc telegraf.Accumulator +} + +func (rb *ParticleWebhook) Register(router *mux.Router, acc telegraf.Accumulator) { + router.HandleFunc(rb.Path, rb.eventHandler).Methods("POST") + log.Printf("I! Started the webhooks_particle on %s\n", rb.Path) + rb.acc = acc +} + +func (rb *ParticleWebhook) eventHandler(w http.ResponseWriter, r *http.Request) { + defer r.Body.Close() + data, err := ioutil.ReadAll(r.Body) + if err != nil { + w.WriteHeader(http.StatusBadRequest) + return + } + dummy := &DummyData{} + if err := json.Unmarshal(data, dummy); err != nil { + w.WriteHeader(http.StatusBadRequest) + return + } + pd := &ParticleData{} + if err := json.Unmarshal([]byte(dummy.Data), pd); err != nil { + w.WriteHeader(http.StatusBadRequest) + return + } + pTime, err := dummy.Time() + if err != nil { + log.Printf("Time Conversion Error") + pTime = time.Now() + } + rb.acc.AddFields(dummy.InfluxDB, pd.Fields, pd.Tags, pTime) + w.WriteHeader(http.StatusOK) +} diff --git a/plugins/inputs/webhooks/particle/particle_webhooks_events.go b/plugins/inputs/webhooks/particle/particle_webhooks_events.go new file mode 100644 index 000000000..089536525 --- /dev/null +++ b/plugins/inputs/webhooks/particle/particle_webhooks_events.go @@ -0,0 +1,22 @@ +package particle + +import ( + "time" +) + +type DummyData struct { + Event string `json:"event"` + Data string `json:"data"` + Ttl int `json:"ttl"` + PublishedAt string `json:"published_at"` + InfluxDB string `json:"influx_db"` +} +type ParticleData struct { + Event string `json:"event"` + Tags map[string]string `json:"tags"` + Fields map[string]interface{} `json:"values"` +} + +func (d *DummyData) Time() (time.Time, error) { + return time.Parse("2006-01-02T15:04:05Z", d.PublishedAt) +} diff --git a/plugins/inputs/webhooks/particle/particle_webhooks_events_json_test.go b/plugins/inputs/webhooks/particle/particle_webhooks_events_json_test.go new file mode 100644 index 000000000..aef0537e9 --- /dev/null +++ b/plugins/inputs/webhooks/particle/particle_webhooks_events_json_test.go @@ -0,0 +1,39 @@ +package particle + +func NewItemJSON() string { + return ` + { + "event": "temperature", + "data": "{ + "tags": { + "id": "230035001147343438323536", + "location": "TravelingWilbury" + }, + "values": { + "temp_c": 26.680000, + "temp_f": 80.024001, + "humidity": 44.937500, + "pressure": 998.998901, + "altitude": 119.331436, + "broadband": 1266, + "infrared": 528, + "lux": 0 + } + }", + "ttl": 60, + "published_at": "2017-09-28T21:54:10.897Z", + "coreid": "123456789938323536", + "userid": "1234ee123ac8e5ec1231a123d", + "version": 10, + "public": false, + "productID": 1234, + "name": "sensor" + "influx_db": "mydata" + }` +} +func UnknowJSON() string { + return ` + { + "event": "roger" + }` +} diff --git a/plugins/inputs/webhooks/particle/particle_webhooks_test.go b/plugins/inputs/webhooks/particle/particle_webhooks_test.go new file mode 100644 index 000000000..c62e0f0c8 --- /dev/null +++ b/plugins/inputs/webhooks/particle/particle_webhooks_test.go @@ -0,0 +1,57 @@ +package particle + +import ( + "github.com/influxdata/telegraf/testutil" + "log" + "net/http" + "net/http/httptest" + "strings" + "testing" +) + +func postWebhooks(rb *ParticleWebhook, eventBody string) *httptest.ResponseRecorder { + req, _ := http.NewRequest("POST", "/", strings.NewReader(eventBody)) + log.Printf("eventBody: %s\n", eventBody) + w := httptest.NewRecorder() + w.Code = 500 + + rb.eventHandler(w, req) + + return w +} + +func TestNewItem(t *testing.T) { + var acc testutil.Accumulator + rb := &ParticleWebhook{Path: "/particle", acc: &acc} + resp := postWebhooks(rb, NewItemJSON()) + log.Printf("Respnse: %s\n", resp.Body) + if resp.Code != http.StatusOK { + t.Errorf("POST new_item returned HTTP status code %v.\nExpected %v", resp.Code, http.StatusOK) + } + + fields := map[string]interface{}{ + "temp_c": 26.680000, + "temp_f": 80.024001, + "humidity": 44.937500, + "pressure": 998.998901, + "altitude": 119.331436, + "broadband": 1266, + "infrared": 528, + "lux": 0, + } + + tags := map[string]string{ + "id": "230035001147343438323536", + "location": "TravelingWilbury", + } + + acc.AssertContainsTaggedFields(t, "particle_webhooks", fields, tags) +} +func TestUnknowItem(t *testing.T) { + rb := &ParticleWebhook{Path: "/particle"} + resp := postWebhooks(rb, UnknowJSON()) + log.Printf("Response: %s\n", resp.Body) + if resp.Code != http.StatusOK { + t.Errorf("POST unknown returned HTTP status code %v.\nExpected %v", resp.Code, http.StatusOK) + } +} diff --git a/plugins/inputs/webhooks/webhooks.go b/plugins/inputs/webhooks/webhooks.go index 698cde159..794b55168 100644 --- a/plugins/inputs/webhooks/webhooks.go +++ b/plugins/inputs/webhooks/webhooks.go @@ -15,6 +15,7 @@ import ( "github.com/influxdata/telegraf/plugins/inputs/webhooks/github" "github.com/influxdata/telegraf/plugins/inputs/webhooks/mandrill" "github.com/influxdata/telegraf/plugins/inputs/webhooks/papertrail" + "github.com/influxdata/telegraf/plugins/inputs/webhooks/particle" "github.com/influxdata/telegraf/plugins/inputs/webhooks/rollbar" ) @@ -34,6 +35,7 @@ type Webhooks struct { Mandrill *mandrill.MandrillWebhook Rollbar *rollbar.RollbarWebhook Papertrail *papertrail.PapertrailWebhook + Particle *particle.ParticleWebhook srv *http.Server } @@ -62,6 +64,9 @@ func (wb *Webhooks) SampleConfig() string { [inputs.webhooks.papertrail] path = "/papertrail" + + [inputs.webhooks.particle] + path = "/particle" ` } diff --git a/plugins/inputs/webhooks/webhooks_test.go b/plugins/inputs/webhooks/webhooks_test.go index 6d3448870..1a5fa4aa1 100644 --- a/plugins/inputs/webhooks/webhooks_test.go +++ b/plugins/inputs/webhooks/webhooks_test.go @@ -6,6 +6,7 @@ import ( "github.com/influxdata/telegraf/plugins/inputs/webhooks/github" "github.com/influxdata/telegraf/plugins/inputs/webhooks/papertrail" + "github.com/influxdata/telegraf/plugins/inputs/webhooks/particle" "github.com/influxdata/telegraf/plugins/inputs/webhooks/rollbar" ) @@ -33,4 +34,9 @@ func TestAvailableWebhooks(t *testing.T) { if !reflect.DeepEqual(wb.AvailableWebhooks(), expected) { t.Errorf("expected to be %v.\nGot %v", expected, wb.AvailableWebhooks()) } + wb.Particle = &particle.ParticleWebhook{Path: "/particle"} + expected = append(expected, wb.Particle) + if !reflect.DeepEqual(wb.AvailableWebhooks(), expected) { + t.Errorf("expected to be %v.\nGot %v", expected, wb.AvailableWebhooks()) + } } From 86961cc8143e3b2edfaabb97c639030754ea9edc Mon Sep 17 00:00:00 2001 From: David Norton Date: Tue, 3 Oct 2017 09:04:29 -0400 Subject: [PATCH 90/95] bug fixes and refactoring --- .../webhooks/particle/particle_webhooks.go | 61 ++++++++++++------- .../particle/particle_webhooks_events.go | 22 ------- .../particle_webhooks_events_json_test.go | 39 ------------ .../particle/particle_webhooks_test.go | 60 +++++++++++++++--- 4 files changed, 90 insertions(+), 92 deletions(-) delete mode 100644 plugins/inputs/webhooks/particle/particle_webhooks_events.go delete mode 100644 plugins/inputs/webhooks/particle/particle_webhooks_events_json_test.go diff --git a/plugins/inputs/webhooks/particle/particle_webhooks.go b/plugins/inputs/webhooks/particle/particle_webhooks.go index 813bd0665..258619856 100644 --- a/plugins/inputs/webhooks/particle/particle_webhooks.go +++ b/plugins/inputs/webhooks/particle/particle_webhooks.go @@ -2,14 +2,40 @@ package particle import ( "encoding/json" - "github.com/gorilla/mux" - "github.com/influxdata/telegraf" - "io/ioutil" "log" "net/http" "time" + + "github.com/gorilla/mux" + "github.com/influxdata/telegraf" ) +type event struct { + Name string `json:"event"` + Data data `json:"data"` + TTL int `json:"ttl"` + PublishedAt string `json:"published_at"` + Database string `json:"influx_db"` +} + +type data struct { + Tags map[string]string `json:"tags"` + Fields map[string]interface{} `json:"values"` +} + +func newEvent() *event { + return &event{ + Data: data{ + Tags: make(map[string]string), + Fields: make(map[string]interface{}), + }, + } +} + +func (e *event) Time() (time.Time, error) { + return time.Parse("2006-01-02T15:04:05Z", e.PublishedAt) +} + type ParticleWebhook struct { Path string acc telegraf.Accumulator @@ -23,26 +49,19 @@ func (rb *ParticleWebhook) Register(router *mux.Router, acc telegraf.Accumulator func (rb *ParticleWebhook) eventHandler(w http.ResponseWriter, r *http.Request) { defer r.Body.Close() - data, err := ioutil.ReadAll(r.Body) + e := newEvent() + if err := json.NewDecoder(r.Body).Decode(e); err != nil { + log.Println(err) + w.WriteHeader(http.StatusBadRequest) + return + } + + pTime, err := e.Time() if err != nil { - w.WriteHeader(http.StatusBadRequest) - return - } - dummy := &DummyData{} - if err := json.Unmarshal(data, dummy); err != nil { - w.WriteHeader(http.StatusBadRequest) - return - } - pd := &ParticleData{} - if err := json.Unmarshal([]byte(dummy.Data), pd); err != nil { - w.WriteHeader(http.StatusBadRequest) - return - } - pTime, err := dummy.Time() - if err != nil { - log.Printf("Time Conversion Error") pTime = time.Now() + log.Printf("error parsing particle event time: %s. Using telegraf host time instead: %s", e.PublishedAt, pTime) } - rb.acc.AddFields(dummy.InfluxDB, pd.Fields, pd.Tags, pTime) + + rb.acc.AddFields(e.Name, e.Data.Fields, e.Data.Tags, pTime) w.WriteHeader(http.StatusOK) } diff --git a/plugins/inputs/webhooks/particle/particle_webhooks_events.go b/plugins/inputs/webhooks/particle/particle_webhooks_events.go deleted file mode 100644 index 089536525..000000000 --- a/plugins/inputs/webhooks/particle/particle_webhooks_events.go +++ /dev/null @@ -1,22 +0,0 @@ -package particle - -import ( - "time" -) - -type DummyData struct { - Event string `json:"event"` - Data string `json:"data"` - Ttl int `json:"ttl"` - PublishedAt string `json:"published_at"` - InfluxDB string `json:"influx_db"` -} -type ParticleData struct { - Event string `json:"event"` - Tags map[string]string `json:"tags"` - Fields map[string]interface{} `json:"values"` -} - -func (d *DummyData) Time() (time.Time, error) { - return time.Parse("2006-01-02T15:04:05Z", d.PublishedAt) -} diff --git a/plugins/inputs/webhooks/particle/particle_webhooks_events_json_test.go b/plugins/inputs/webhooks/particle/particle_webhooks_events_json_test.go deleted file mode 100644 index aef0537e9..000000000 --- a/plugins/inputs/webhooks/particle/particle_webhooks_events_json_test.go +++ /dev/null @@ -1,39 +0,0 @@ -package particle - -func NewItemJSON() string { - return ` - { - "event": "temperature", - "data": "{ - "tags": { - "id": "230035001147343438323536", - "location": "TravelingWilbury" - }, - "values": { - "temp_c": 26.680000, - "temp_f": 80.024001, - "humidity": 44.937500, - "pressure": 998.998901, - "altitude": 119.331436, - "broadband": 1266, - "infrared": 528, - "lux": 0 - } - }", - "ttl": 60, - "published_at": "2017-09-28T21:54:10.897Z", - "coreid": "123456789938323536", - "userid": "1234ee123ac8e5ec1231a123d", - "version": 10, - "public": false, - "productID": 1234, - "name": "sensor" - "influx_db": "mydata" - }` -} -func UnknowJSON() string { - return ` - { - "event": "roger" - }` -} diff --git a/plugins/inputs/webhooks/particle/particle_webhooks_test.go b/plugins/inputs/webhooks/particle/particle_webhooks_test.go index c62e0f0c8..eecf26e14 100644 --- a/plugins/inputs/webhooks/particle/particle_webhooks_test.go +++ b/plugins/inputs/webhooks/particle/particle_webhooks_test.go @@ -1,17 +1,16 @@ package particle import ( - "github.com/influxdata/telegraf/testutil" - "log" "net/http" "net/http/httptest" "strings" "testing" + + "github.com/influxdata/telegraf/testutil" ) func postWebhooks(rb *ParticleWebhook, eventBody string) *httptest.ResponseRecorder { req, _ := http.NewRequest("POST", "/", strings.NewReader(eventBody)) - log.Printf("eventBody: %s\n", eventBody) w := httptest.NewRecorder() w.Code = 500 @@ -21,10 +20,10 @@ func postWebhooks(rb *ParticleWebhook, eventBody string) *httptest.ResponseRecor } func TestNewItem(t *testing.T) { + t.Parallel() var acc testutil.Accumulator rb := &ParticleWebhook{Path: "/particle", acc: &acc} resp := postWebhooks(rb, NewItemJSON()) - log.Printf("Respnse: %s\n", resp.Body) if resp.Code != http.StatusOK { t.Errorf("POST new_item returned HTTP status code %v.\nExpected %v", resp.Code, http.StatusOK) } @@ -32,12 +31,12 @@ func TestNewItem(t *testing.T) { fields := map[string]interface{}{ "temp_c": 26.680000, "temp_f": 80.024001, + "infrared": 528.0, + "lux": 0.0, "humidity": 44.937500, "pressure": 998.998901, "altitude": 119.331436, - "broadband": 1266, - "infrared": 528, - "lux": 0, + "broadband": 1266.0, } tags := map[string]string{ @@ -45,13 +44,54 @@ func TestNewItem(t *testing.T) { "location": "TravelingWilbury", } - acc.AssertContainsTaggedFields(t, "particle_webhooks", fields, tags) + acc.AssertContainsTaggedFields(t, "temperature", fields, tags) } + func TestUnknowItem(t *testing.T) { - rb := &ParticleWebhook{Path: "/particle"} + t.Parallel() + var acc testutil.Accumulator + rb := &ParticleWebhook{Path: "/particle", acc: &acc} resp := postWebhooks(rb, UnknowJSON()) - log.Printf("Response: %s\n", resp.Body) if resp.Code != http.StatusOK { t.Errorf("POST unknown returned HTTP status code %v.\nExpected %v", resp.Code, http.StatusOK) } } + +func NewItemJSON() string { + return ` + { + "event": "temperature", + "data": { + "tags": { + "id": "230035001147343438323536", + "location": "TravelingWilbury" + }, + "values": { + "temp_c": 26.680000, + "temp_f": 80.024001, + "humidity": 44.937500, + "pressure": 998.998901, + "altitude": 119.331436, + "broadband": 1266, + "infrared": 528, + "lux": 0 + } + }, + "ttl": 60, + "published_at": "2017-09-28T21:54:10.897Z", + "coreid": "123456789938323536", + "userid": "1234ee123ac8e5ec1231a123d", + "version": 10, + "public": false, + "productID": 1234, + "name": "sensor", + "influx_db": "mydata" + }` +} + +func UnknowJSON() string { + return ` + { + "event": "roger" + }` +} From 8ed00af10a9a5950850f27a5c196dcc0245166b0 Mon Sep 17 00:00:00 2001 From: "David G. Simmons" Date: Thu, 5 Oct 2017 16:13:19 -0400 Subject: [PATCH 91/95] Update README.md --- plugins/inputs/webhooks/particle/README.md | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/plugins/inputs/webhooks/particle/README.md b/plugins/inputs/webhooks/particle/README.md index 1212b742a..d11b6f7fb 100644 --- a/plugins/inputs/webhooks/particle/README.md +++ b/plugins/inputs/webhooks/particle/README.md @@ -1,6 +1,15 @@ # particle webhooks -You should configure your Rollbar's Webhooks to point at the `webhooks` service. To do this go to `particle.com/` and click `Settings > Notifications > Webhook`. In the resulting page set `URL` to `http://:1619/particle`, and click on `Enable Webhook Integration`. +You should configure your Particle.io's Webhooks to point at the `webhooks` service. To do this go to `console.particle.io/` and click `Integrations > New Integration > Webhook`. In the resulting page set `URL` to `http://:1619/particle`, and under `Advanced Settings` click on `JSON` and add: + +``` +{ + "influx_db": "your_database_name" +} +``` + +If required, enter your username and password, etc. and then click `Save` + ## Events @@ -18,9 +27,9 @@ String data = String::format("{ \"tags\" : { ); Particle.publish("event_name", data, PRIVATE); ``` -Escaping the "" is required in the source file. +Escaping the "" is required in the source file on the Particle device. The number of tag values and field values is not restrictied so you can send as many values per webhook call as you'd like. -You will need to enable JSON messages in the Webhooks setup of Particle.io + See [webhook doc](https://docs.particle.io/reference/webhooks/) From a6ada03b910ffa5ac8bc85ad52a1b789dc99aae6 Mon Sep 17 00:00:00 2001 From: "David G. Simmons" Date: Thu, 5 Oct 2017 16:14:27 -0400 Subject: [PATCH 92/95] Updated README.md --- plugins/inputs/webhooks/particle/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/inputs/webhooks/particle/README.md b/plugins/inputs/webhooks/particle/README.md index d11b6f7fb..e7b206b53 100644 --- a/plugins/inputs/webhooks/particle/README.md +++ b/plugins/inputs/webhooks/particle/README.md @@ -28,7 +28,7 @@ String data = String::format("{ \"tags\" : { Particle.publish("event_name", data, PRIVATE); ``` Escaping the "" is required in the source file on the Particle device. -The number of tag values and field values is not restrictied so you can send as many values per webhook call as you'd like. +The number of tag values and field values is not restricted so you can send as many values per webhook call as you'd like. From a987118b010ca3827c1ee8ebe691f6f6bd287ac5 Mon Sep 17 00:00:00 2001 From: "David G. Simmons" Date: Thu, 2 Nov 2017 15:18:45 -0400 Subject: [PATCH 93/95] Small fixes Hoping to pass CircleCI this time --- plugins/inputs/webhooks/particle/README.md | 4 ++-- plugins/inputs/webhooks/particle/particle_webhooks_test.go | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/plugins/inputs/webhooks/particle/README.md b/plugins/inputs/webhooks/particle/README.md index e7b206b53..3c345daa8 100644 --- a/plugins/inputs/webhooks/particle/README.md +++ b/plugins/inputs/webhooks/particle/README.md @@ -1,10 +1,10 @@ # particle webhooks -You should configure your Particle.io's Webhooks to point at the `webhooks` service. To do this go to `console.particle.io/` and click `Integrations > New Integration > Webhook`. In the resulting page set `URL` to `http://:1619/particle`, and under `Advanced Settings` click on `JSON` and add: +You should configure your Particle.io's Webhooks to point at the `webhooks` service. To do this go to `(https://console.particle.io/)[https://console.particle.io]` and click `Integrations > New Integration > Webhook`. In the resulting page set `URL` to `http://:1619/particle`, and under `Advanced Settings` click on `JSON` and add: ``` { - "influx_db": "your_database_name" + "influx_db": "your_measurement_name" } ``` diff --git a/plugins/inputs/webhooks/particle/particle_webhooks_test.go b/plugins/inputs/webhooks/particle/particle_webhooks_test.go index eecf26e14..850b2c4fc 100644 --- a/plugins/inputs/webhooks/particle/particle_webhooks_test.go +++ b/plugins/inputs/webhooks/particle/particle_webhooks_test.go @@ -74,7 +74,7 @@ func NewItemJSON() string { "altitude": 119.331436, "broadband": 1266, "infrared": 528, - "lux": 0 + "lux": 0.0 } }, "ttl": 60, From 92caf33fff56eb730be62f235ede68d2e833db83 Mon Sep 17 00:00:00 2001 From: "David G. Simmons" Date: Thu, 2 Nov 2017 17:23:09 -0400 Subject: [PATCH 94/95] Updated Test JSON --- plugins/inputs/webhooks/particle/particle_webhooks_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/plugins/inputs/webhooks/particle/particle_webhooks_test.go b/plugins/inputs/webhooks/particle/particle_webhooks_test.go index 850b2c4fc..53252ff17 100644 --- a/plugins/inputs/webhooks/particle/particle_webhooks_test.go +++ b/plugins/inputs/webhooks/particle/particle_webhooks_test.go @@ -72,8 +72,8 @@ func NewItemJSON() string { "humidity": 44.937500, "pressure": 998.998901, "altitude": 119.331436, - "broadband": 1266, - "infrared": 528, + "broadband": 1266.0, + "infrared": 528.0, "lux": 0.0 } }, From 4596ae70a9063f457c131291d984b77e625852d4 Mon Sep 17 00:00:00 2001 From: "David G. Simmons" Date: Fri, 3 Nov 2017 12:05:17 -0400 Subject: [PATCH 95/95] ignore mac-files --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index 8269337df..e93bc8dff 100644 --- a/.gitignore +++ b/.gitignore @@ -5,3 +5,4 @@ tivan .idea *~ *# +.DS_Store