From e2dade377795cad959d844bcba90bc019ed280bf Mon Sep 17 00:00:00 2001 From: Sean Knox Date: Fri, 24 Aug 2018 09:35:17 -0700 Subject: [PATCH] list by pod, add nodemetrics --- Gopkg.lock | 30 ++++--------- Gopkg.toml | 6 ++- kube_advisor.go | 110 ++++++++++++++++++++++++++++-------------------- 3 files changed, 79 insertions(+), 67 deletions(-) diff --git a/Gopkg.lock b/Gopkg.lock index 7fe3adc..9f6e3b5 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -1,12 +1,6 @@ # This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. -[[projects]] - name = "github.com/fatih/color" - packages = ["."] - revision = "5b77d2a35fb0ede96d138fc9a99f5c9b6aef11b4" - version = "v1.7.0" - [[projects]] name = "github.com/ghodss/yaml" packages = ["."] @@ -67,18 +61,6 @@ revision = "1624edc4454b8682399def8740d46db5e4362ba4" version = "1.1.5" -[[projects]] - name = "github.com/mattn/go-colorable" - packages = ["."] - revision = "167de6bfdfba052fa6b2d3664c8f5272e23c9072" - version = "v0.0.9" - -[[projects]] - name = "github.com/mattn/go-isatty" - packages = ["."] - revision = "0360b2af4f38e8d38c7fce2a9f4e702702d73a39" - version = "v0.0.3" - [[projects]] name = "github.com/mattn/go-runewidth" packages = ["."] @@ -172,8 +154,8 @@ [[projects]] name = "k8s.io/apimachinery" packages = ["pkg/api/errors","pkg/api/meta","pkg/api/resource","pkg/apis/meta/v1","pkg/apis/meta/v1/unstructured","pkg/apis/meta/v1beta1","pkg/conversion","pkg/conversion/queryparams","pkg/fields","pkg/labels","pkg/runtime","pkg/runtime/schema","pkg/runtime/serializer","pkg/runtime/serializer/json","pkg/runtime/serializer/protobuf","pkg/runtime/serializer/recognizer","pkg/runtime/serializer/streaming","pkg/runtime/serializer/versioning","pkg/selection","pkg/types","pkg/util/clock","pkg/util/errors","pkg/util/framer","pkg/util/intstr","pkg/util/json","pkg/util/net","pkg/util/runtime","pkg/util/sets","pkg/util/validation","pkg/util/validation/field","pkg/util/wait","pkg/util/yaml","pkg/version","pkg/watch","third_party/forked/golang/reflect"] - revision = "302974c03f7e50f16561ba237db776ab93594ef6" - version = "kubernetes-1.10.0" + revision = "103fd098999dc9c0c88536f5c9ad2e5da39373ae" + version = "kubernetes-1.11.0" [[projects]] name = "k8s.io/client-go" @@ -181,9 +163,15 @@ revision = "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" version = "v8.0.0" +[[projects]] + name = "k8s.io/metrics" + packages = ["pkg/apis/metrics","pkg/apis/metrics/v1alpha1","pkg/apis/metrics/v1beta1","pkg/client/clientset_generated/clientset","pkg/client/clientset_generated/clientset/scheme","pkg/client/clientset_generated/clientset/typed/metrics/v1alpha1","pkg/client/clientset_generated/clientset/typed/metrics/v1beta1"] + revision = "7afb501849915187f5b29c9727e106cbc6299d1c" + version = "kubernetes-1.11.2" + [solve-meta] analyzer-name = "dep" analyzer-version = 1 - inputs-digest = "a4691b4b3e97cf7c975063b053695a5902a76cbfe4d04afebb83861063f1d8ae" + inputs-digest = "313e4a4d7b8f98ab52b376065f0e361e700e044fcc5a05739b0b09d61d511227" solver-name = "gps-cdcl" solver-version = 1 diff --git a/Gopkg.toml b/Gopkg.toml index c5aa093..b1b77ce 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -4,7 +4,11 @@ [[constraint]] name = "k8s.io/apimachinery" - version = "kubernetes-1.10.0" + version = "kubernetes-1.11.2" + +[[constraint]] + name = "k8s.io/metrics" + version = "kubernetes-1.11.2" [[constraint]] name = "k8s.io/client-go" diff --git a/kube_advisor.go b/kube_advisor.go index aa86f2f..5057870 100644 --- a/kube_advisor.go +++ b/kube_advisor.go @@ -2,6 +2,7 @@ package main import ( "flag" + "fmt" "log" "os" "path/filepath" @@ -11,10 +12,22 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" "k8s.io/client-go/tools/clientcmd" + + "k8s.io/metrics/pkg/apis/metrics/v1beta1" + metrics "k8s.io/metrics/pkg/client/clientset_generated/clientset" ) -func checkContainer(c v1.Container, ns string) (StatusCheck, bool) { - sc := StatusCheck{ContainerName: c.Name, Namespace: ns, Missing: make(map[string]bool)} +func checkContainer(c v1.Container, p v1.Pod, pm v1beta1.PodMetrics) (PodStatusCheck, bool) { + sc := PodStatusCheck{ + ContainerName: c.Name, + PodName: p.Name, + Missing: make(map[string]bool), + } + + for _, c := range pm.Containers { + sc.PodCPU = c.Usage.Cpu().String() + sc.PodMemory = c.Usage.Memory().String() + } if c.Resources.Limits.Cpu().IsZero() { sc.Missing["CPU Resource Limits Missing"] = true @@ -29,18 +42,26 @@ func checkContainer(c v1.Container, ns string) (StatusCheck, bool) { sc.Missing["Memory Request Limits Missing"] = true } if len(sc.Missing) == 0 { - return StatusCheck{}, false + return PodStatusCheck{}, false } return sc, true } -// StatusCheck represents a container and its resource and request limit status -type StatusCheck struct { +// PodStatusCheck represents a container and its resource and request limit status +type PodStatusCheck struct { + PodName string ContainerName string - Namespace string + PodCPU string + PodMemory string Missing map[string]bool } +type NodeStatusCheck struct { + NodeName string + NodeCPU string + NodeMemory string +} + func main() { kubePtr := flag.Bool("use-kubeconfig", false, "use kubeconfig on local system") flag.Parse() @@ -64,59 +85,55 @@ func main() { if err != nil { log.Fatal(err) } - - deploymentsAppsV1, err := clientset.AppsV1().Deployments("").List(metav1.ListOptions{}) + metricClient, err := metrics.NewForConfig(config) if err != nil { - log.Fatalln("failed to get deployments:", err) - } - daemonsetsAppsV1, err := clientset.AppsV1().DaemonSets("").List(metav1.ListOptions{}) - if err != nil { - log.Fatalln("failed to get daemon sets:", err) - } - statefulsetsAppsV1, err := clientset.AppsV1().StatefulSets("").List(metav1.ListOptions{}) - if err != nil { - log.Fatalln("failed to get stateful sets:", err) + log.Fatal(err) } - statusChecksWrapper := make(map[string][]*StatusCheck) + statusChecksWrapper := make(map[string][]*PodStatusCheck) - // Gather container statusChecksWrapper from Deployments - for _, d := range deploymentsAppsV1.Items { - containers := d.Spec.Template.Spec.Containers + pods, _ := clientset.CoreV1().Pods("").List(metav1.ListOptions{}) + if err != nil { + log.Fatalln("failed to get pods:", err) + } + for _, p := range pods.Items { + containers := p.Spec.Containers for _, c := range containers { - status, ok := checkContainer(c, d.Namespace) - if ok { - statusChecksWrapper[d.GetName()] = append(statusChecksWrapper[d.GetName()], &status) + podMetricsList, _ := metricClient.MetricsV1beta1().PodMetricses("").List(metav1.ListOptions{}) + for _, pm := range podMetricsList.Items { + if p.Name == pm.Name { + status, ok := checkContainer(c, p, pm) + if ok { + statusChecksWrapper[p.Namespace] = append(statusChecksWrapper[p.Namespace], &status) + } + } } } } - // Gather container statusChecksWrapper from StatefulSets - for _, ss := range statefulsetsAppsV1.Items { - containers := ss.Spec.Template.Spec.Containers - for _, c := range containers { - status, ok := checkContainer(c, ss.Namespace) - if ok { - statusChecksWrapper[ss.GetName()] = append(statusChecksWrapper[ss.GetName()], &status) - } - } + var nodeStatuses []*NodeStatusCheck + nodeMetricsList, _ := metricClient.MetricsV1beta1().NodeMetricses().List(metav1.ListOptions{}) + for _, nm := range nodeMetricsList.Items { + ns := NodeStatusCheck{NodeName: nm.Name, NodeCPU: nm.Usage.Cpu().String(), NodeMemory: nm.Usage.Memory().String()} + nodeStatuses = append(nodeStatuses, &ns) } - // Gather container statusChecksWrapper from DaemonSets - for _, ds := range daemonsetsAppsV1.Items { - containers := ds.Spec.Template.Spec.Containers - for _, c := range containers { - status, ok := checkContainer(c, ds.Namespace) - if ok { - statusChecksWrapper[ds.GetName()] = append(statusChecksWrapper[ds.GetName()], &status) - } - } + nodeTable := tablewriter.NewWriter(os.Stdout) + nodeTable.SetHeader([]string{"Node", "Node CPU Usage", "Node Memory Usage"}) + nodeTable.SetHeaderColor(tablewriter.Colors{tablewriter.Bold, tablewriter.BgBlackColor}, + tablewriter.Colors{tablewriter.Bold, tablewriter.BgBlackColor}, + tablewriter.Colors{tablewriter.Bold, tablewriter.BgBlackColor}) + nodeTable.SetAutoMergeCells(true) + nodeTable.SetRowLine(true) + for _, ns := range nodeStatuses { + nodeTable.Append([]string{ns.NodeName, ns.NodeCPU, ns.NodeMemory}) } issuesTable := tablewriter.NewWriter(os.Stdout) for k, statusChecks := range statusChecksWrapper { - issuesTable.SetHeader([]string{"Deployment/StatefulSet/DaemonSet", "Namespace", "Container", "Issue"}) + issuesTable.SetHeader([]string{"Namespace", "Pod Name", "Pod CPU/Memory", "Container", "Issue"}) issuesTable.SetHeaderColor(tablewriter.Colors{tablewriter.Bold, tablewriter.BgBlackColor}, + tablewriter.Colors{tablewriter.Bold, tablewriter.BgBlackColor}, tablewriter.Colors{tablewriter.Bold, tablewriter.BgBlackColor}, tablewriter.Colors{tablewriter.Bold, tablewriter.BgBlackColor}, tablewriter.Colors{tablewriter.Bold, tablewriter.BgBlackColor}) @@ -124,11 +141,11 @@ func main() { issuesTable.SetRowLine(true) for _, s := range statusChecks { for key := range s.Missing { - issuesTable.Append([]string{k, s.Namespace, s.ContainerName, key}) + resourceString := fmt.Sprintf("%v / %v", s.PodCPU, s.PodMemory) + issuesTable.Append([]string{k, s.PodName, resourceString, s.ContainerName, key}) } } } - issuesTable.Render() remediationTable := tablewriter.NewWriter(os.Stdout) remediationTable.SetHeader([]string{"Issue", "Remediation"}) @@ -139,5 +156,8 @@ func main() { remediationTable.Append([]string{"Memory Request Limits Missing", "Consider setting resource and request limits to prevent resource starvation: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/"}) remediationTable.Append([]string{"CPU Resource Limits Missing", "Consider setting resource and request limits to prevent resource starvation: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/"}) remediationTable.Append([]string{"Memory Resource Limits Missing", "Consider setting resource and request limits to prevent resource starvation: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/"}) + + issuesTable.Render() + nodeTable.Render() remediationTable.Render() }