Remove usage of deprecated v1beta API endpoints (#6543)
This commit is contained in:
parent
988e036641
commit
47a708ec99
|
@ -413,8 +413,7 @@
|
||||||
packages = [
|
packages = [
|
||||||
".",
|
".",
|
||||||
"apis/apiextensions/v1beta1",
|
"apis/apiextensions/v1beta1",
|
||||||
"apis/apps/v1beta1",
|
"apis/apps/v1",
|
||||||
"apis/apps/v1beta2",
|
|
||||||
"apis/core/v1",
|
"apis/core/v1",
|
||||||
"apis/extensions/v1beta1",
|
"apis/extensions/v1beta1",
|
||||||
"apis/meta/v1",
|
"apis/meta/v1",
|
||||||
|
@ -1712,8 +1711,7 @@
|
||||||
"github.com/docker/libnetwork/ipvs",
|
"github.com/docker/libnetwork/ipvs",
|
||||||
"github.com/eclipse/paho.mqtt.golang",
|
"github.com/eclipse/paho.mqtt.golang",
|
||||||
"github.com/ericchiang/k8s",
|
"github.com/ericchiang/k8s",
|
||||||
"github.com/ericchiang/k8s/apis/apps/v1beta1",
|
"github.com/ericchiang/k8s/apis/apps/v1",
|
||||||
"github.com/ericchiang/k8s/apis/apps/v1beta2",
|
|
||||||
"github.com/ericchiang/k8s/apis/core/v1",
|
"github.com/ericchiang/k8s/apis/core/v1",
|
||||||
"github.com/ericchiang/k8s/apis/extensions/v1beta1",
|
"github.com/ericchiang/k8s/apis/extensions/v1beta1",
|
||||||
"github.com/ericchiang/k8s/apis/meta/v1",
|
"github.com/ericchiang/k8s/apis/meta/v1",
|
||||||
|
|
|
@ -1,5 +1,7 @@
|
||||||
# Kube_Inventory Plugin
|
# Kube_Inventory Plugin
|
||||||
|
|
||||||
This plugin generates metrics derived from the state of the following Kubernetes resources:
|
This plugin generates metrics derived from the state of the following Kubernetes resources:
|
||||||
|
|
||||||
- daemonsets
|
- daemonsets
|
||||||
- deployments
|
- deployments
|
||||||
- nodes
|
- nodes
|
||||||
|
@ -8,6 +10,12 @@ This plugin generates metrics derived from the state of the following Kubernetes
|
||||||
- pods (containers)
|
- pods (containers)
|
||||||
- statefulsets
|
- statefulsets
|
||||||
|
|
||||||
|
Kubernetes is a fast moving project, with a new minor release every 3 months. As
|
||||||
|
such, we will aim to maintain support only for versions that are supported by
|
||||||
|
the major cloud providers; this is roughly 4 release / 2 years.
|
||||||
|
|
||||||
|
**This plugin supports Kubernetes 1.11 and later.**
|
||||||
|
|
||||||
#### Series Cardinality Warning
|
#### Series Cardinality Warning
|
||||||
|
|
||||||
This plugin may produce a high number of series which, when not controlled
|
This plugin may produce a high number of series which, when not controlled
|
||||||
|
@ -61,6 +69,7 @@ avoid cardinality issues:
|
||||||
#### Kubernetes Permissions
|
#### Kubernetes Permissions
|
||||||
|
|
||||||
If using [RBAC authorization](https://kubernetes.io/docs/reference/access-authn-authz/rbac/), you will need to create a cluster role to list "persistentvolumes" and "nodes". You will then need to make an [aggregated ClusterRole](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#aggregated-clusterroles) that will eventually be bound to a user or group.
|
If using [RBAC authorization](https://kubernetes.io/docs/reference/access-authn-authz/rbac/), you will need to create a cluster role to list "persistentvolumes" and "nodes". You will then need to make an [aggregated ClusterRole](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#aggregated-clusterroles) that will eventually be bound to a user or group.
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
---
|
---
|
||||||
kind: ClusterRole
|
kind: ClusterRole
|
||||||
|
@ -89,6 +98,7 @@ rules: [] # Rules are automatically filled in by the controller manager.
|
||||||
```
|
```
|
||||||
|
|
||||||
Bind the newly created aggregated ClusterRole with the following config file, updating the subjects as needed.
|
Bind the newly created aggregated ClusterRole with the following config file, updating the subjects as needed.
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
---
|
---
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
@ -105,10 +115,9 @@ subjects:
|
||||||
namespace: default
|
namespace: default
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
### Metrics:
|
### Metrics:
|
||||||
|
|
||||||
+ kubernetes_daemonset
|
- kubernetes_daemonset
|
||||||
- tags:
|
- tags:
|
||||||
- daemonset_name
|
- daemonset_name
|
||||||
- namespace
|
- namespace
|
||||||
|
@ -122,7 +131,7 @@ subjects:
|
||||||
- number_unavailable
|
- number_unavailable
|
||||||
- updated_number_scheduled
|
- updated_number_scheduled
|
||||||
|
|
||||||
- kubernetes_deployment
|
* kubernetes_deployment
|
||||||
- tags:
|
- tags:
|
||||||
- deployment_name
|
- deployment_name
|
||||||
- namespace
|
- namespace
|
||||||
|
@ -131,7 +140,7 @@ subjects:
|
||||||
- replicas_unavailable
|
- replicas_unavailable
|
||||||
- created
|
- created
|
||||||
|
|
||||||
+ kubernetes_endpoints
|
- kubernetes_endpoints
|
||||||
- tags:
|
- tags:
|
||||||
- endpoint_name
|
- endpoint_name
|
||||||
- namespace
|
- namespace
|
||||||
|
@ -139,14 +148,14 @@ subjects:
|
||||||
- node_name
|
- node_name
|
||||||
- port_name
|
- port_name
|
||||||
- port_protocol
|
- port_protocol
|
||||||
- kind (*varies)
|
- kind (\*varies)
|
||||||
- fields:
|
- fields:
|
||||||
- created
|
- created
|
||||||
- generation
|
- generation
|
||||||
- ready
|
- ready
|
||||||
- port
|
- port
|
||||||
|
|
||||||
- kubernetes_ingress
|
* kubernetes_ingress
|
||||||
- tags:
|
- tags:
|
||||||
- ingress_name
|
- ingress_name
|
||||||
- namespace
|
- namespace
|
||||||
|
@ -161,7 +170,7 @@ subjects:
|
||||||
- backend_service_port
|
- backend_service_port
|
||||||
- tls
|
- tls
|
||||||
|
|
||||||
+ kubernetes_node
|
- kubernetes_node
|
||||||
- tags:
|
- tags:
|
||||||
- node_name
|
- node_name
|
||||||
- fields:
|
- fields:
|
||||||
|
@ -172,7 +181,7 @@ subjects:
|
||||||
- allocatable_memory_bytes
|
- allocatable_memory_bytes
|
||||||
- allocatable_pods
|
- allocatable_pods
|
||||||
|
|
||||||
- kubernetes_persistentvolume
|
* kubernetes_persistentvolume
|
||||||
- tags:
|
- tags:
|
||||||
- pv_name
|
- pv_name
|
||||||
- phase
|
- phase
|
||||||
|
@ -180,7 +189,7 @@ subjects:
|
||||||
- fields:
|
- fields:
|
||||||
- phase_type (int, [see below](#pv-phase_type))
|
- phase_type (int, [see below](#pv-phase_type))
|
||||||
|
|
||||||
+ kubernetes_persistentvolumeclaim
|
- kubernetes_persistentvolumeclaim
|
||||||
- tags:
|
- tags:
|
||||||
- pvc_name
|
- pvc_name
|
||||||
- namespace
|
- namespace
|
||||||
|
@ -189,7 +198,7 @@ subjects:
|
||||||
- fields:
|
- fields:
|
||||||
- phase_type (int, [see below](#pvc-phase_type))
|
- phase_type (int, [see below](#pvc-phase_type))
|
||||||
|
|
||||||
- kubernetes_pod_container
|
* kubernetes_pod_container
|
||||||
- tags:
|
- tags:
|
||||||
- container_name
|
- container_name
|
||||||
- namespace
|
- namespace
|
||||||
|
@ -204,7 +213,7 @@ subjects:
|
||||||
- resource_limits_cpu_units
|
- resource_limits_cpu_units
|
||||||
- resource_limits_memory_bytes
|
- resource_limits_memory_bytes
|
||||||
|
|
||||||
+ kubernetes_service
|
- kubernetes_service
|
||||||
- tags:
|
- tags:
|
||||||
- service_name
|
- service_name
|
||||||
- namespace
|
- namespace
|
||||||
|
@ -218,7 +227,7 @@ subjects:
|
||||||
- port
|
- port
|
||||||
- target_port
|
- target_port
|
||||||
|
|
||||||
- kubernetes_statefulset
|
* kubernetes_statefulset
|
||||||
- tags:
|
- tags:
|
||||||
- statefulset_name
|
- statefulset_name
|
||||||
- namespace
|
- namespace
|
||||||
|
@ -237,7 +246,7 @@ subjects:
|
||||||
The persistentvolume "phase" is saved in the `phase` tag with a correlated numeric field called `phase_type` corresponding with that tag value.
|
The persistentvolume "phase" is saved in the `phase` tag with a correlated numeric field called `phase_type` corresponding with that tag value.
|
||||||
|
|
||||||
| Tag value | Corresponding field value |
|
| Tag value | Corresponding field value |
|
||||||
-----------|-------------------------|
|
| --------- | ------------------------- |
|
||||||
| bound | 0 |
|
| bound | 0 |
|
||||||
| failed | 1 |
|
| failed | 1 |
|
||||||
| pending | 2 |
|
| pending | 2 |
|
||||||
|
@ -250,13 +259,12 @@ The persistentvolume "phase" is saved in the `phase` tag with a correlated numer
|
||||||
The persistentvolumeclaim "phase" is saved in the `phase` tag with a correlated numeric field called `phase_type` corresponding with that tag value.
|
The persistentvolumeclaim "phase" is saved in the `phase` tag with a correlated numeric field called `phase_type` corresponding with that tag value.
|
||||||
|
|
||||||
| Tag value | Corresponding field value |
|
| Tag value | Corresponding field value |
|
||||||
-----------|-------------------------|
|
| --------- | ------------------------- |
|
||||||
| bound | 0 |
|
| bound | 0 |
|
||||||
| lost | 1 |
|
| lost | 1 |
|
||||||
| pending | 2 |
|
| pending | 2 |
|
||||||
| unknown | 3 |
|
| unknown | 3 |
|
||||||
|
|
||||||
|
|
||||||
### Example Output:
|
### Example Output:
|
||||||
|
|
||||||
```
|
```
|
||||||
|
@ -271,7 +279,6 @@ kubernetes_pod_container,container_name=telegraf,namespace=default,node_name=ip-
|
||||||
kubernetes_statefulset,namespace=default,statefulset_name=etcd replicas_updated=3i,spec_replicas=3i,observed_generation=1i,created=1544101669000000000i,generation=1i,replicas=3i,replicas_current=3i,replicas_ready=3i 1547597616000000000
|
kubernetes_statefulset,namespace=default,statefulset_name=etcd replicas_updated=3i,spec_replicas=3i,observed_generation=1i,created=1544101669000000000i,generation=1i,replicas=3i,replicas_current=3i,replicas_ready=3i 1547597616000000000
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
[metric filtering]: https://github.com/influxdata/telegraf/blob/master/docs/CONFIGURATION.md#metric-filtering
|
[metric filtering]: https://github.com/influxdata/telegraf/blob/master/docs/CONFIGURATION.md#metric-filtering
|
||||||
[retention policy]: https://docs.influxdata.com/influxdb/latest/guides/downsampling_and_retention/
|
[retention policy]: https://docs.influxdata.com/influxdb/latest/guides/downsampling_and_retention/
|
||||||
[max-series-per-database]: https://docs.influxdata.com/influxdb/latest/administration/config/#max-series-per-database-1000000
|
[max-series-per-database]: https://docs.influxdata.com/influxdb/latest/administration/config/#max-series-per-database-1000000
|
||||||
|
|
|
@ -5,9 +5,8 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ericchiang/k8s"
|
"github.com/ericchiang/k8s"
|
||||||
"github.com/ericchiang/k8s/apis/apps/v1beta1"
|
v1APPS "github.com/ericchiang/k8s/apis/apps/v1"
|
||||||
"github.com/ericchiang/k8s/apis/apps/v1beta2"
|
v1 "github.com/ericchiang/k8s/apis/core/v1"
|
||||||
"github.com/ericchiang/k8s/apis/core/v1"
|
|
||||||
v1beta1EXT "github.com/ericchiang/k8s/apis/extensions/v1beta1"
|
v1beta1EXT "github.com/ericchiang/k8s/apis/extensions/v1beta1"
|
||||||
|
|
||||||
"github.com/influxdata/telegraf/internal/tls"
|
"github.com/influxdata/telegraf/internal/tls"
|
||||||
|
@ -48,15 +47,15 @@ func newClient(baseURL, namespace, bearerToken string, timeout time.Duration, tl
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *client) getDaemonSets(ctx context.Context) (*v1beta2.DaemonSetList, error) {
|
func (c *client) getDaemonSets(ctx context.Context) (*v1APPS.DaemonSetList, error) {
|
||||||
list := new(v1beta2.DaemonSetList)
|
list := new(v1APPS.DaemonSetList)
|
||||||
ctx, cancel := context.WithTimeout(ctx, c.timeout)
|
ctx, cancel := context.WithTimeout(ctx, c.timeout)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
return list, c.List(ctx, c.namespace, list)
|
return list, c.List(ctx, c.namespace, list)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *client) getDeployments(ctx context.Context) (*v1beta1.DeploymentList, error) {
|
func (c *client) getDeployments(ctx context.Context) (*v1APPS.DeploymentList, error) {
|
||||||
list := &v1beta1.DeploymentList{}
|
list := &v1APPS.DeploymentList{}
|
||||||
ctx, cancel := context.WithTimeout(ctx, c.timeout)
|
ctx, cancel := context.WithTimeout(ctx, c.timeout)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
return list, c.List(ctx, c.namespace, list)
|
return list, c.List(ctx, c.namespace, list)
|
||||||
|
@ -111,8 +110,8 @@ func (c *client) getServices(ctx context.Context) (*v1.ServiceList, error) {
|
||||||
return list, c.List(ctx, c.namespace, list)
|
return list, c.List(ctx, c.namespace, list)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *client) getStatefulSets(ctx context.Context) (*v1beta1.StatefulSetList, error) {
|
func (c *client) getStatefulSets(ctx context.Context) (*v1APPS.StatefulSetList, error) {
|
||||||
list := new(v1beta1.StatefulSetList)
|
list := new(v1APPS.StatefulSetList)
|
||||||
ctx, cancel := context.WithTimeout(ctx, c.timeout)
|
ctx, cancel := context.WithTimeout(ctx, c.timeout)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
return list, c.List(ctx, c.namespace, list)
|
return list, c.List(ctx, c.namespace, list)
|
||||||
|
|
|
@ -4,7 +4,7 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ericchiang/k8s/apis/apps/v1beta2"
|
"github.com/ericchiang/k8s/apis/apps/v1"
|
||||||
|
|
||||||
"github.com/influxdata/telegraf"
|
"github.com/influxdata/telegraf"
|
||||||
)
|
)
|
||||||
|
@ -23,7 +23,7 @@ func collectDaemonSets(ctx context.Context, acc telegraf.Accumulator, ki *Kubern
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ki *KubernetesInventory) gatherDaemonSet(d v1beta2.DaemonSet, acc telegraf.Accumulator) error {
|
func (ki *KubernetesInventory) gatherDaemonSet(d v1.DaemonSet, acc telegraf.Accumulator) error {
|
||||||
fields := map[string]interface{}{
|
fields := map[string]interface{}{
|
||||||
"generation": d.Metadata.GetGeneration(),
|
"generation": d.Metadata.GetGeneration(),
|
||||||
"current_number_scheduled": d.Status.GetCurrentNumberScheduled(),
|
"current_number_scheduled": d.Status.GetCurrentNumberScheduled(),
|
||||||
|
|
|
@ -4,7 +4,7 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ericchiang/k8s/apis/apps/v1beta2"
|
"github.com/ericchiang/k8s/apis/apps/v1"
|
||||||
metav1 "github.com/ericchiang/k8s/apis/meta/v1"
|
metav1 "github.com/ericchiang/k8s/apis/meta/v1"
|
||||||
|
|
||||||
"github.com/influxdata/telegraf/testutil"
|
"github.com/influxdata/telegraf/testutil"
|
||||||
|
@ -24,7 +24,7 @@ func TestDaemonSet(t *testing.T) {
|
||||||
name: "no daemon set",
|
name: "no daemon set",
|
||||||
handler: &mockHandler{
|
handler: &mockHandler{
|
||||||
responseMap: map[string]interface{}{
|
responseMap: map[string]interface{}{
|
||||||
"/daemonsets/": &v1beta2.DaemonSetList{},
|
"/daemonsets/": &v1.DaemonSetList{},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
hasError: false,
|
hasError: false,
|
||||||
|
@ -33,10 +33,10 @@ func TestDaemonSet(t *testing.T) {
|
||||||
name: "collect daemonsets",
|
name: "collect daemonsets",
|
||||||
handler: &mockHandler{
|
handler: &mockHandler{
|
||||||
responseMap: map[string]interface{}{
|
responseMap: map[string]interface{}{
|
||||||
"/daemonsets/": &v1beta2.DaemonSetList{
|
"/daemonsets/": &v1.DaemonSetList{
|
||||||
Items: []*v1beta2.DaemonSet{
|
Items: []*v1.DaemonSet{
|
||||||
{
|
{
|
||||||
Status: &v1beta2.DaemonSetStatus{
|
Status: &v1.DaemonSetStatus{
|
||||||
CurrentNumberScheduled: toInt32Ptr(3),
|
CurrentNumberScheduled: toInt32Ptr(3),
|
||||||
DesiredNumberScheduled: toInt32Ptr(5),
|
DesiredNumberScheduled: toInt32Ptr(5),
|
||||||
NumberAvailable: toInt32Ptr(2),
|
NumberAvailable: toInt32Ptr(2),
|
||||||
|
@ -90,7 +90,7 @@ func TestDaemonSet(t *testing.T) {
|
||||||
client: cli,
|
client: cli,
|
||||||
}
|
}
|
||||||
acc := new(testutil.Accumulator)
|
acc := new(testutil.Accumulator)
|
||||||
for _, dset := range ((v.handler.responseMap["/daemonsets/"]).(*v1beta2.DaemonSetList)).Items {
|
for _, dset := range ((v.handler.responseMap["/daemonsets/"]).(*v1.DaemonSetList)).Items {
|
||||||
err := ks.gatherDaemonSet(*dset, acc)
|
err := ks.gatherDaemonSet(*dset, acc)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Failed to gather daemonset - %s", err.Error())
|
t.Errorf("Failed to gather daemonset - %s", err.Error())
|
||||||
|
|
|
@ -4,8 +4,7 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ericchiang/k8s/apis/apps/v1beta1"
|
v1 "github.com/ericchiang/k8s/apis/apps/v1"
|
||||||
|
|
||||||
"github.com/influxdata/telegraf"
|
"github.com/influxdata/telegraf"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -23,7 +22,7 @@ func collectDeployments(ctx context.Context, acc telegraf.Accumulator, ki *Kuber
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ki *KubernetesInventory) gatherDeployment(d v1beta1.Deployment, acc telegraf.Accumulator) error {
|
func (ki *KubernetesInventory) gatherDeployment(d v1.Deployment, acc telegraf.Accumulator) error {
|
||||||
fields := map[string]interface{}{
|
fields := map[string]interface{}{
|
||||||
"replicas_available": d.Status.GetAvailableReplicas(),
|
"replicas_available": d.Status.GetAvailableReplicas(),
|
||||||
"replicas_unavailable": d.Status.GetUnavailableReplicas(),
|
"replicas_unavailable": d.Status.GetUnavailableReplicas(),
|
||||||
|
|
|
@ -4,7 +4,7 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ericchiang/k8s/apis/apps/v1beta1"
|
"github.com/ericchiang/k8s/apis/apps/v1"
|
||||||
metav1 "github.com/ericchiang/k8s/apis/meta/v1"
|
metav1 "github.com/ericchiang/k8s/apis/meta/v1"
|
||||||
"github.com/ericchiang/k8s/util/intstr"
|
"github.com/ericchiang/k8s/util/intstr"
|
||||||
"github.com/influxdata/telegraf/testutil"
|
"github.com/influxdata/telegraf/testutil"
|
||||||
|
@ -37,7 +37,7 @@ func TestDeployment(t *testing.T) {
|
||||||
name: "no deployments",
|
name: "no deployments",
|
||||||
handler: &mockHandler{
|
handler: &mockHandler{
|
||||||
responseMap: map[string]interface{}{
|
responseMap: map[string]interface{}{
|
||||||
"/deployments/": &v1beta1.DeploymentList{},
|
"/deployments/": &v1.DeploymentList{},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
hasError: false,
|
hasError: false,
|
||||||
|
@ -46,19 +46,19 @@ func TestDeployment(t *testing.T) {
|
||||||
name: "collect deployments",
|
name: "collect deployments",
|
||||||
handler: &mockHandler{
|
handler: &mockHandler{
|
||||||
responseMap: map[string]interface{}{
|
responseMap: map[string]interface{}{
|
||||||
"/deployments/": &v1beta1.DeploymentList{
|
"/deployments/": &v1.DeploymentList{
|
||||||
Items: []*v1beta1.Deployment{
|
Items: []*v1.Deployment{
|
||||||
{
|
{
|
||||||
Status: &v1beta1.DeploymentStatus{
|
Status: &v1.DeploymentStatus{
|
||||||
Replicas: toInt32Ptr(3),
|
Replicas: toInt32Ptr(3),
|
||||||
AvailableReplicas: toInt32Ptr(1),
|
AvailableReplicas: toInt32Ptr(1),
|
||||||
UnavailableReplicas: toInt32Ptr(4),
|
UnavailableReplicas: toInt32Ptr(4),
|
||||||
UpdatedReplicas: toInt32Ptr(2),
|
UpdatedReplicas: toInt32Ptr(2),
|
||||||
ObservedGeneration: toInt64Ptr(9121),
|
ObservedGeneration: toInt64Ptr(9121),
|
||||||
},
|
},
|
||||||
Spec: &v1beta1.DeploymentSpec{
|
Spec: &v1.DeploymentSpec{
|
||||||
Strategy: &v1beta1.DeploymentStrategy{
|
Strategy: &v1.DeploymentStrategy{
|
||||||
RollingUpdate: &v1beta1.RollingUpdateDeployment{
|
RollingUpdate: &v1.RollingUpdateDeployment{
|
||||||
MaxUnavailable: &intstr.IntOrString{
|
MaxUnavailable: &intstr.IntOrString{
|
||||||
IntVal: toInt32Ptr(30),
|
IntVal: toInt32Ptr(30),
|
||||||
},
|
},
|
||||||
|
@ -98,7 +98,7 @@ func TestDeployment(t *testing.T) {
|
||||||
client: cli,
|
client: cli,
|
||||||
}
|
}
|
||||||
acc := new(testutil.Accumulator)
|
acc := new(testutil.Accumulator)
|
||||||
for _, deployment := range ((v.handler.responseMap["/deployments/"]).(*v1beta1.DeploymentList)).Items {
|
for _, deployment := range ((v.handler.responseMap["/deployments/"]).(*v1.DeploymentList)).Items {
|
||||||
err := ks.gatherDeployment(*deployment, acc)
|
err := ks.gatherDeployment(*deployment, acc)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Failed to gather deployment - %s", err.Error())
|
t.Errorf("Failed to gather deployment - %s", err.Error())
|
||||||
|
|
|
@ -4,7 +4,7 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ericchiang/k8s/apis/core/v1"
|
v1 "github.com/ericchiang/k8s/apis/core/v1"
|
||||||
v1beta1EXT "github.com/ericchiang/k8s/apis/extensions/v1beta1"
|
v1beta1EXT "github.com/ericchiang/k8s/apis/extensions/v1beta1"
|
||||||
metav1 "github.com/ericchiang/k8s/apis/meta/v1"
|
metav1 "github.com/ericchiang/k8s/apis/meta/v1"
|
||||||
"github.com/influxdata/telegraf/testutil"
|
"github.com/influxdata/telegraf/testutil"
|
||||||
|
|
|
@ -4,7 +4,7 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ericchiang/k8s/apis/apps/v1beta1"
|
"github.com/ericchiang/k8s/apis/apps/v1"
|
||||||
|
|
||||||
"github.com/influxdata/telegraf"
|
"github.com/influxdata/telegraf"
|
||||||
)
|
)
|
||||||
|
@ -23,7 +23,7 @@ func collectStatefulSets(ctx context.Context, acc telegraf.Accumulator, ki *Kube
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ki *KubernetesInventory) gatherStatefulSet(s v1beta1.StatefulSet, acc telegraf.Accumulator) error {
|
func (ki *KubernetesInventory) gatherStatefulSet(s v1.StatefulSet, acc telegraf.Accumulator) error {
|
||||||
status := s.Status
|
status := s.Status
|
||||||
fields := map[string]interface{}{
|
fields := map[string]interface{}{
|
||||||
"created": time.Unix(s.Metadata.CreationTimestamp.GetSeconds(), int64(s.Metadata.CreationTimestamp.GetNanos())).UnixNano(),
|
"created": time.Unix(s.Metadata.CreationTimestamp.GetSeconds(), int64(s.Metadata.CreationTimestamp.GetNanos())).UnixNano(),
|
||||||
|
|
|
@ -4,7 +4,7 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ericchiang/k8s/apis/apps/v1beta1"
|
"github.com/ericchiang/k8s/apis/apps/v1"
|
||||||
metav1 "github.com/ericchiang/k8s/apis/meta/v1"
|
metav1 "github.com/ericchiang/k8s/apis/meta/v1"
|
||||||
|
|
||||||
"github.com/influxdata/telegraf/testutil"
|
"github.com/influxdata/telegraf/testutil"
|
||||||
|
@ -24,7 +24,7 @@ func TestStatefulSet(t *testing.T) {
|
||||||
name: "no statefulsets",
|
name: "no statefulsets",
|
||||||
handler: &mockHandler{
|
handler: &mockHandler{
|
||||||
responseMap: map[string]interface{}{
|
responseMap: map[string]interface{}{
|
||||||
"/statefulsets/": &v1beta1.StatefulSetList{},
|
"/statefulsets/": &v1.StatefulSetList{},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
hasError: false,
|
hasError: false,
|
||||||
|
@ -33,17 +33,17 @@ func TestStatefulSet(t *testing.T) {
|
||||||
name: "collect statefulsets",
|
name: "collect statefulsets",
|
||||||
handler: &mockHandler{
|
handler: &mockHandler{
|
||||||
responseMap: map[string]interface{}{
|
responseMap: map[string]interface{}{
|
||||||
"/statefulsets/": &v1beta1.StatefulSetList{
|
"/statefulsets/": &v1.StatefulSetList{
|
||||||
Items: []*v1beta1.StatefulSet{
|
Items: []*v1.StatefulSet{
|
||||||
{
|
{
|
||||||
Status: &v1beta1.StatefulSetStatus{
|
Status: &v1.StatefulSetStatus{
|
||||||
Replicas: toInt32Ptr(2),
|
Replicas: toInt32Ptr(2),
|
||||||
CurrentReplicas: toInt32Ptr(4),
|
CurrentReplicas: toInt32Ptr(4),
|
||||||
ReadyReplicas: toInt32Ptr(1),
|
ReadyReplicas: toInt32Ptr(1),
|
||||||
UpdatedReplicas: toInt32Ptr(3),
|
UpdatedReplicas: toInt32Ptr(3),
|
||||||
ObservedGeneration: toInt64Ptr(119),
|
ObservedGeneration: toInt64Ptr(119),
|
||||||
},
|
},
|
||||||
Spec: &v1beta1.StatefulSetSpec{
|
Spec: &v1.StatefulSetSpec{
|
||||||
Replicas: toInt32Ptr(3),
|
Replicas: toInt32Ptr(3),
|
||||||
},
|
},
|
||||||
Metadata: &metav1.ObjectMeta{
|
Metadata: &metav1.ObjectMeta{
|
||||||
|
@ -90,7 +90,7 @@ func TestStatefulSet(t *testing.T) {
|
||||||
client: cli,
|
client: cli,
|
||||||
}
|
}
|
||||||
acc := new(testutil.Accumulator)
|
acc := new(testutil.Accumulator)
|
||||||
for _, ss := range ((v.handler.responseMap["/statefulsets/"]).(*v1beta1.StatefulSetList)).Items {
|
for _, ss := range ((v.handler.responseMap["/statefulsets/"]).(*v1.StatefulSetList)).Items {
|
||||||
err := ks.gatherStatefulSet(*ss, acc)
|
err := ks.gatherStatefulSet(*ss, acc)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Failed to gather ss - %s", err.Error())
|
t.Errorf("Failed to gather ss - %s", err.Error())
|
||||||
|
|
|
@ -3,11 +3,19 @@
|
||||||
This input plugin talks to the kubelet api using the `/stats/summary` endpoint to gather metrics about the running pods and containers for a single host. It is assumed that this plugin is running as part of a `daemonset` within a kubernetes installation. This means that telegraf is running on every node within the cluster. Therefore, you should configure this plugin to talk to its locally running kubelet.
|
This input plugin talks to the kubelet api using the `/stats/summary` endpoint to gather metrics about the running pods and containers for a single host. It is assumed that this plugin is running as part of a `daemonset` within a kubernetes installation. This means that telegraf is running on every node within the cluster. Therefore, you should configure this plugin to talk to its locally running kubelet.
|
||||||
|
|
||||||
To find the ip address of the host you are running on you can issue a command like the following:
|
To find the ip address of the host you are running on you can issue a command like the following:
|
||||||
|
|
||||||
```
|
```
|
||||||
$ curl -s $API_URL/api/v1/namespaces/$POD_NAMESPACE/pods/$HOSTNAME --header "Authorization: Bearer $TOKEN" --insecure | jq -r '.status.hostIP'
|
$ curl -s $API_URL/api/v1/namespaces/$POD_NAMESPACE/pods/$HOSTNAME --header "Authorization: Bearer $TOKEN" --insecure | jq -r '.status.hostIP'
|
||||||
```
|
```
|
||||||
|
|
||||||
In this case we used the downward API to pass in the `$POD_NAMESPACE` and `$HOSTNAME` is the hostname of the pod which is set by the kubernetes API.
|
In this case we used the downward API to pass in the `$POD_NAMESPACE` and `$HOSTNAME` is the hostname of the pod which is set by the kubernetes API.
|
||||||
|
|
||||||
|
Kubernetes is a fast moving project, with a new minor release every 3 months. As
|
||||||
|
such, we will aim to maintain support only for versions that are supported by
|
||||||
|
the major cloud providers; this is roughly 4 release / 2 years.
|
||||||
|
|
||||||
|
**This plugin supports Kubernetes 1.11 and later.**
|
||||||
|
|
||||||
#### Series Cardinality Warning
|
#### Series Cardinality Warning
|
||||||
|
|
||||||
This plugin may produce a high number of series which, when not controlled
|
This plugin may produce a high number of series which, when not controlled
|
||||||
|
@ -80,7 +88,7 @@ Architecture][k8s-telegraf] or view the Helm charts:
|
||||||
- runtime_image_fs_capacity_bytes
|
- runtime_image_fs_capacity_bytes
|
||||||
- runtime_image_fs_used_bytes
|
- runtime_image_fs_used_bytes
|
||||||
|
|
||||||
+ kubernetes_pod_container
|
* kubernetes_pod_container
|
||||||
- tags:
|
- tags:
|
||||||
- container_name
|
- container_name
|
||||||
- namespace
|
- namespace
|
||||||
|
@ -112,7 +120,7 @@ Architecture][k8s-telegraf] or view the Helm charts:
|
||||||
- capacity_bytes
|
- capacity_bytes
|
||||||
- used_bytes
|
- used_bytes
|
||||||
|
|
||||||
+ kubernetes_pod_network
|
* kubernetes_pod_network
|
||||||
- tags:
|
- tags:
|
||||||
- namespace
|
- namespace
|
||||||
- node_name
|
- node_name
|
||||||
|
@ -141,7 +149,7 @@ kubernetes_system_container
|
||||||
[series cardinality]: https://docs.influxdata.com/influxdb/latest/query_language/spec/#show-cardinality
|
[series cardinality]: https://docs.influxdata.com/influxdb/latest/query_language/spec/#show-cardinality
|
||||||
[influx-docs]: https://docs.influxdata.com/influxdb/latest/
|
[influx-docs]: https://docs.influxdata.com/influxdb/latest/
|
||||||
[k8s-telegraf]: https://www.influxdata.com/blog/monitoring-kubernetes-architecture/
|
[k8s-telegraf]: https://www.influxdata.com/blog/monitoring-kubernetes-architecture/
|
||||||
[Telegraf]: https://github.com/helm/charts/tree/master/stable/telegraf
|
[telegraf]: https://github.com/helm/charts/tree/master/stable/telegraf
|
||||||
[InfluxDB]: https://github.com/helm/charts/tree/master/stable/influxdb
|
[influxdb]: https://github.com/helm/charts/tree/master/stable/influxdb
|
||||||
[Chronograf]: https://github.com/helm/charts/tree/master/stable/chronograf
|
[chronograf]: https://github.com/helm/charts/tree/master/stable/chronograf
|
||||||
[Kapacitor]: https://github.com/helm/charts/tree/master/stable/kapacitor
|
[kapacitor]: https://github.com/helm/charts/tree/master/stable/kapacitor
|
||||||
|
|
Loading…
Reference in New Issue