From d3aeda737ec56a3709815e7b71166ec7b30fd2e5 Mon Sep 17 00:00:00 2001 From: Nitish Malhotra Date: Fri, 4 Feb 2022 16:25:12 -0800 Subject: [PATCH] feat: [NPM] Restructure code and add deploy manifests (#1203) --- Tiltfile | 11 + docs/npm.md | 2 +- npm/azure-npm.yaml | 6 +- npm/cmd/root.go | 8 +- npm/cmd/start.go | 8 +- npm/cmd/start_daemon.go | 9 +- npm/cmd/start_server.go | 3 +- npm/config/config.go | 14 +- npm/{ => controller}/server.go | 89 ++++---- npm/{ => daemon}/daemon.go | 7 +- npm/deploy/kustomize/README.md | 36 ++++ npm/deploy/kustomize/base/configmap.yaml | 25 +++ npm/deploy/kustomize/base/kustomization.yaml | 7 + npm/deploy/kustomize/base/rbac.yaml | 44 ++++ npm/deploy/kustomize/base/serviceaccount.yaml | 7 + .../overlays/controller/deployment.yaml | 77 +++++++ .../overlays/controller/kustomization.yaml | 7 + .../overlays/controller/service.yaml | 34 ++++ .../kustomize/overlays/daemon/deployment.yaml | 90 +++++++++ .../overlays/daemon/kustomization.yaml | 7 + .../kustomize/overlays/daemon/service.yaml | 16 ++ .../manifests/common/npm-configmap.yaml | 24 +++ .../manifests/common/npm-serviceaccount.yaml | 7 + npm/deploy/manifests/common/rbac.yaml | 44 ++++ .../manifests/controller/azure-npm.yaml | 190 ++++++++++++++++++ npm/deploy/manifests/daemon/azure-npm.yaml | 186 +++++++++++++++++ npm/deploy/npm/azure-npm.yaml | 164 +++++++++++++++ npm/examples/windows/azure-npm.yaml | 5 + npm/npm.go | 130 ++++++------ npm/{ => pkg/models}/consts.go | 9 +- npm/pkg/models/types.go | 53 +++++ npm/pkg/transport/events_client.go | 2 + npm/pkg/transport/events_server.go | 4 +- npm/types.go | 78 ------- 34 files changed, 1201 insertions(+), 202 deletions(-) create mode 100644 Tiltfile rename npm/{ => controller}/server.go (51%) rename npm/{ => daemon}/daemon.go (88%) create mode 100644 npm/deploy/kustomize/README.md create mode 100644 npm/deploy/kustomize/base/configmap.yaml create mode 100644 npm/deploy/kustomize/base/kustomization.yaml create mode 100644 npm/deploy/kustomize/base/rbac.yaml create mode 100644 npm/deploy/kustomize/base/serviceaccount.yaml create mode 100644 npm/deploy/kustomize/overlays/controller/deployment.yaml create mode 100644 npm/deploy/kustomize/overlays/controller/kustomization.yaml create mode 100644 npm/deploy/kustomize/overlays/controller/service.yaml create mode 100644 npm/deploy/kustomize/overlays/daemon/deployment.yaml create mode 100644 npm/deploy/kustomize/overlays/daemon/kustomization.yaml create mode 100644 npm/deploy/kustomize/overlays/daemon/service.yaml create mode 100644 npm/deploy/manifests/common/npm-configmap.yaml create mode 100644 npm/deploy/manifests/common/npm-serviceaccount.yaml create mode 100644 npm/deploy/manifests/common/rbac.yaml create mode 100644 npm/deploy/manifests/controller/azure-npm.yaml create mode 100644 npm/deploy/manifests/daemon/azure-npm.yaml create mode 100644 npm/deploy/npm/azure-npm.yaml rename npm/{ => pkg/models}/consts.go (76%) create mode 100644 npm/pkg/models/types.go delete mode 100644 npm/types.go diff --git a/Tiltfile b/Tiltfile new file mode 100644 index 000000000..cf1b36f29 --- /dev/null +++ b/Tiltfile @@ -0,0 +1,11 @@ +allow_k8s_contexts(k8s_context()) +default_registry('ttl.sh/nitishm-12390') +docker_build('azure-npm', '.', dockerfile='npm/Dockerfile', build_args = { + "VERSION": "v1.4.14-101-gf900e319-dirty", + "NPM_AI_PATH": "github.com/Azure/azure-container-networking/npm.aiMetadata", + "NPM_AI_ID": "014c22bd-4107-459e-8475-67909e96edcb" +}) +# watch_file('npm') +k8s_yaml('npm/deploy/manifests/controller/azure-npm.yaml') +k8s_yaml('npm/deploy/manifests/daemon/azure-npm.yaml', allow_duplicates=True) + diff --git a/docs/npm.md b/docs/npm.md index 724b3345a..0326cb42f 100644 --- a/docs/npm.md +++ b/docs/npm.md @@ -12,7 +12,7 @@ Azure-NPM serves as a distributed firewall for the Kubernetes cluster, and it ca Running the command below will bring up one azure-npm instance on each Kubernetes node. ``` -kubectl apply -f https://raw.githubusercontent.com/Azure/azure-container-networking/master/npm/azure-npm.yaml +kubectl apply -f https://raw.githubusercontent.com/Azure/azure-container-networking/master/npm/deploy/npm/azure-npm.yaml ``` Now you can secure your Kubernetes cluster with Azure-NPM by applying Kubernetes network policies. diff --git a/npm/azure-npm.yaml b/npm/azure-npm.yaml index dfdcac0c8..8d3908ca1 100644 --- a/npm/azure-npm.yaml +++ b/npm/azure-npm.yaml @@ -97,10 +97,10 @@ spec: - name: NPM_CONFIG value: /etc/azure-npm/azure-npm.json volumeMounts: - - name: xtables-lock - mountPath: /run/xtables.lock - name: log mountPath: /var/log + - name: xtables-lock + mountPath: /run/xtables.lock - name: protocols mountPath: /etc/protocols - name: azure-npm-config @@ -156,4 +156,4 @@ data: "EnableV2NPM": false, "PlaceAzureChainFirst": false } - } \ No newline at end of file + } diff --git a/npm/cmd/root.go b/npm/cmd/root.go index 88a0faab5..0c9bc38b7 100644 --- a/npm/cmd/root.go +++ b/npm/cmd/root.go @@ -14,7 +14,13 @@ func NewRootCmd() *cobra.Command { }, } - rootCmd.AddCommand(newStartNPMCmd()) + startCmd := newStartNPMCmd() + + startCmd.AddCommand(newStartNPMControlplaneCmd()) + startCmd.AddCommand(newStartNPMDaemonCmd()) + + rootCmd.AddCommand(startCmd) + rootCmd.AddCommand(newDebugCmd()) return rootCmd diff --git a/npm/cmd/start.go b/npm/cmd/start.go index c9d50a459..5adbde705 100644 --- a/npm/cmd/start.go +++ b/npm/cmd/start.go @@ -18,6 +18,7 @@ import ( "github.com/Azure/azure-container-networking/npm/pkg/dataplane" "github.com/Azure/azure-container-networking/npm/pkg/dataplane/ipsets" "github.com/Azure/azure-container-networking/npm/pkg/dataplane/policies" + "github.com/Azure/azure-container-networking/npm/pkg/models" "github.com/Azure/azure-container-networking/npm/util" "github.com/spf13/cobra" "github.com/spf13/viper" @@ -85,11 +86,6 @@ func newStartNPMCmd() *cobra.Command { startNPMCmd.Flags().String(flagKubeConfigPath, flagDefaults[flagKubeConfigPath], "path to kubeconfig") - // The controlplane subcommand starts the NPM controller's controlplane component in the decomposed mode - startNPMCmd.AddCommand(newStartNPMControlplaneCmd()) - // The daemon subcommand starts the NPM controller's datapath component in the daemon mode - startNPMCmd.AddCommand(newStartNPMDaemonCmd()) - return startNPMCmd } @@ -149,7 +145,7 @@ func start(config npmconfig.Config, flags npmconfig.Flags) error { } else { npmV2DataplaneCfg.IPSetMode = ipsets.ApplyAllIPSets } - dp, err = dataplane.NewDataPlane(npm.GetNodeName(), common.NewIOShim(), npmV2DataplaneCfg, stopChannel) + dp, err = dataplane.NewDataPlane(models.GetNodeName(), common.NewIOShim(), npmV2DataplaneCfg, stopChannel) if err != nil { return fmt.Errorf("failed to create dataplane with error %w", err) } diff --git a/npm/cmd/start_daemon.go b/npm/cmd/start_daemon.go index 08ce1b423..c6928c574 100644 --- a/npm/cmd/start_daemon.go +++ b/npm/cmd/start_daemon.go @@ -7,10 +7,11 @@ import ( "strconv" "github.com/Azure/azure-container-networking/common" - "github.com/Azure/azure-container-networking/npm" npmconfig "github.com/Azure/azure-container-networking/npm/config" + "github.com/Azure/azure-container-networking/npm/daemon" "github.com/Azure/azure-container-networking/npm/pkg/controlplane/goalstateprocessor" "github.com/Azure/azure-container-networking/npm/pkg/dataplane" + "github.com/Azure/azure-container-networking/npm/pkg/models" "github.com/Azure/azure-container-networking/npm/pkg/transport" "github.com/spf13/cobra" "github.com/spf13/viper" @@ -48,7 +49,7 @@ func startDaemon(config npmconfig.Config) error { pod := os.Getenv(podNameEnv) node := os.Getenv(nodeNameEnv) - addr := config.Transport.Address + ":" + strconv.Itoa(config.Transport.Port) + addr := config.Transport.Address + ":" + strconv.Itoa(config.Transport.ServicePort) ctx := context.Background() err := initLogging() if err != nil { @@ -58,7 +59,7 @@ func startDaemon(config npmconfig.Config) error { var dp dataplane.GenericDataplane - dp, err = dataplane.NewDataPlane(npm.GetNodeName(), common.NewIOShim(), npmV2DataplaneCfg, wait.NeverStop) + dp, err = dataplane.NewDataPlane(models.GetNodeName(), common.NewIOShim(), npmV2DataplaneCfg, wait.NeverStop) if err != nil { klog.Errorf("failed to create dataplane: %v", err) return fmt.Errorf("failed to create dataplane with error %w", err) @@ -76,7 +77,7 @@ func startDaemon(config npmconfig.Config) error { return fmt.Errorf("failed to create goalstate processor: %w", err) } - n, err := npm.NewNetworkPolicyDaemon(ctx, config, dp, gsp, client, version) + n, err := daemon.NewNetworkPolicyDaemon(ctx, config, dp, gsp, client, version) if err != nil { klog.Errorf("failed to create dataplane : %v", err) return fmt.Errorf("failed to create dataplane: %w", err) diff --git a/npm/cmd/start_server.go b/npm/cmd/start_server.go index 3d3f9742f..ffc5499b7 100644 --- a/npm/cmd/start_server.go +++ b/npm/cmd/start_server.go @@ -8,6 +8,7 @@ import ( "github.com/Azure/azure-container-networking/npm" npmconfig "github.com/Azure/azure-container-networking/npm/config" + "github.com/Azure/azure-container-networking/npm/controller" restserver "github.com/Azure/azure-container-networking/npm/http/server" "github.com/Azure/azure-container-networking/npm/metrics" "github.com/Azure/azure-container-networking/npm/pkg/dataplane" @@ -105,7 +106,7 @@ func startControlplane(config npmconfig.Config, flags npmconfig.Flags) error { return fmt.Errorf("failed to create dataplane with error: %w", err) } - npMgr, err := npm.NewNetworkPolicyServer(config, factory, mgr, dp, version, k8sServerVersion) + npMgr, err := controller.NewNetworkPolicyServer(config, factory, mgr, dp, version, k8sServerVersion) if err != nil { klog.Errorf("failed to create NPM controlplane manager with error: %v", err) return fmt.Errorf("failed to create NPM controlplane manager: %w", err) diff --git a/npm/config/config.go b/npm/config/config.go index 4ceb1cdf3..3bb07114e 100644 --- a/npm/config/config.go +++ b/npm/config/config.go @@ -1,9 +1,10 @@ package npmconfig const ( - defaultResyncPeriod = 15 - defaultListeningPort = 10091 - defaultGrpcPort = 10092 + defaultResyncPeriod = 15 + defaultListeningPort = 10091 + defaultGrpcPort = 10092 + defaultGrpcServicePort = 9002 // ConfigEnvPath is what's used by viper to load config path ConfigEnvPath = "NPM_CONFIG" ) @@ -16,8 +17,9 @@ var DefaultConfig = Config{ ListeningAddress: "0.0.0.0", Transport: GrpcServerConfig{ - Address: "0.0.0.0", - Port: defaultGrpcPort, + Address: "0.0.0.0", + Port: defaultGrpcPort, + ServicePort: defaultGrpcServicePort, }, Toggles: Toggles{ @@ -35,6 +37,8 @@ type GrpcServerConfig struct { Address string `json:"Address,omitempty"` // Port is the port on which the gRPC server will listen Port int `json:"Port,omitempty"` + // ServicePort is the service port for the client to connect to the gRPC server + ServicePort int `json:"ServicePort,omitempty"` } type Config struct { diff --git a/npm/server.go b/npm/controller/server.go similarity index 51% rename from npm/server.go rename to npm/controller/server.go index edd2a9b6f..672b4f305 100644 --- a/npm/server.go +++ b/npm/controller/server.go @@ -1,6 +1,6 @@ // Copyright 2018 Microsoft. All rights reserved. // MIT License -package npm +package controller import ( "encoding/json" @@ -9,6 +9,7 @@ import ( npmconfig "github.com/Azure/azure-container-networking/npm/config" controllersv2 "github.com/Azure/azure-container-networking/npm/pkg/controlplane/controllers/v2" "github.com/Azure/azure-container-networking/npm/pkg/dataplane" + "github.com/Azure/azure-container-networking/npm/pkg/models" "github.com/Azure/azure-container-networking/npm/pkg/transport" "github.com/pkg/errors" "k8s.io/apimachinery/pkg/version" @@ -17,6 +18,8 @@ import ( "k8s.io/klog" ) +var aiMetadata string //nolint // aiMetadata is set in Makefile + type NetworkPolicyServer struct { config npmconfig.Config @@ -25,20 +28,20 @@ type NetworkPolicyServer struct { // Informers are the Kubernetes Informer // https://pkg.go.dev/k8s.io/client-go/informers - Informers + models.Informers // Controllers for handling Kubernetes resource watcher events - K8SControllersV2 + models.K8SControllersV2 // Azure-specific variables - AzureConfig + models.AzureConfig } var ( - ErrInformerFactoryNil = errors.New("informer factory is nil") - ErrTransportManagerNil = errors.New("transport manager is nil") - ErrK8SServerVersionNil = errors.New("k8s server version is nil") - ErrInformerSyncFailure = errors.New("informer sync failure") + ErrInformerFactoryNil = errors.New("informer factory is nil") + ErrTransportManagerNil = errors.New("transport manager is nil") + ErrK8SServerVersionNil = errors.New("k8s server version is nil") + ErrDataplaneNotInitialized = errors.New("dataplane is not initialized") ) func NewNetworkPolicyServer( @@ -70,87 +73,87 @@ func NewNetworkPolicyServer( n := &NetworkPolicyServer{ config: config, tm: mgr, - Informers: Informers{ - informerFactory: informerFactory, - podInformer: informerFactory.Core().V1().Pods(), - nsInformer: informerFactory.Core().V1().Namespaces(), - npInformer: informerFactory.Networking().V1().NetworkPolicies(), + Informers: models.Informers{ + InformerFactory: informerFactory, + PodInformer: informerFactory.Core().V1().Pods(), + NsInformer: informerFactory.Core().V1().Namespaces(), + NpInformer: informerFactory.Networking().V1().NetworkPolicies(), }, - AzureConfig: AzureConfig{ - k8sServerVersion: k8sServerVersion, - NodeName: GetNodeName(), - version: npmVersion, + AzureConfig: models.AzureConfig{ + K8sServerVersion: k8sServerVersion, + NodeName: models.GetNodeName(), + Version: npmVersion, TelemetryEnabled: true, }, } - n.npmNamespaceCacheV2 = &controllersv2.NpmNamespaceCache{NsMap: make(map[string]*controllersv2.Namespace)} - n.podControllerV2 = controllersv2.NewPodController(n.podInformer, dp, n.npmNamespaceCacheV2) - n.namespaceControllerV2 = controllersv2.NewNamespaceController(n.nsInformer, dp, n.npmNamespaceCacheV2) - n.netPolControllerV2 = controllersv2.NewNetworkPolicyController(n.npInformer, dp) + n.NpmNamespaceCacheV2 = &controllersv2.NpmNamespaceCache{NsMap: make(map[string]*controllersv2.Namespace)} + n.PodControllerV2 = controllersv2.NewPodController(n.PodInformer, dp, n.NpmNamespaceCacheV2) + n.NamespaceControllerV2 = controllersv2.NewNamespaceController(n.NsInformer, dp, n.NpmNamespaceCacheV2) + n.NetPolControllerV2 = controllersv2.NewNetworkPolicyController(n.NpInformer, dp) return n, nil } func (n *NetworkPolicyServer) MarshalJSON() ([]byte, error) { - m := map[CacheKey]json.RawMessage{} + m := map[models.CacheKey]json.RawMessage{} var npmNamespaceCacheRaw []byte var err error - npmNamespaceCacheRaw, err = json.Marshal(n.npmNamespaceCacheV2) + npmNamespaceCacheRaw, err = json.Marshal(n.NpmNamespaceCacheV2) if err != nil { - return nil, errors.Errorf("%s: %v", errMarshalNPMCache, err) + return nil, errors.Errorf("%s: %v", models.ErrMarshalNPMCache, err) } - m[NsMap] = npmNamespaceCacheRaw + m[models.NsMap] = npmNamespaceCacheRaw var podControllerRaw []byte - podControllerRaw, err = json.Marshal(n.podControllerV2) + podControllerRaw, err = json.Marshal(n.PodControllerV2) if err != nil { - return nil, errors.Errorf("%s: %v", errMarshalNPMCache, err) + return nil, errors.Errorf("%s: %v", models.ErrMarshalNPMCache, err) } - m[PodMap] = podControllerRaw + m[models.PodMap] = podControllerRaw nodeNameRaw, err := json.Marshal(n.NodeName) if err != nil { - return nil, errors.Errorf("%s: %v", errMarshalNPMCache, err) + return nil, errors.Errorf("%s: %v", models.ErrMarshalNPMCache, err) } - m[NodeName] = nodeNameRaw + m[models.NodeName] = nodeNameRaw npmCacheRaw, err := json.Marshal(m) if err != nil { - return nil, errors.Errorf("%s: %v", errMarshalNPMCache, err) + return nil, errors.Errorf("%s: %v", models.ErrMarshalNPMCache, err) } return npmCacheRaw, nil } func (n *NetworkPolicyServer) GetAppVersion() string { - return n.version + return n.Version } func (n *NetworkPolicyServer) Start(config npmconfig.Config, stopCh <-chan struct{}) error { - // Starts all informers manufactured by n's informerFactory. - n.informerFactory.Start(stopCh) + // Starts all informers manufactured by n's InformerFactory. + n.InformerFactory.Start(stopCh) // Wait for the initial sync of local cache. - if !cache.WaitForCacheSync(stopCh, n.podInformer.Informer().HasSynced) { - return fmt.Errorf("Pod informer error: %w", ErrInformerSyncFailure) + if !cache.WaitForCacheSync(stopCh, n.PodInformer.Informer().HasSynced) { + return fmt.Errorf("Pod informer error: %w", models.ErrInformerSyncFailure) } - if !cache.WaitForCacheSync(stopCh, n.nsInformer.Informer().HasSynced) { - return fmt.Errorf("Namespace informer error: %w", ErrInformerSyncFailure) + if !cache.WaitForCacheSync(stopCh, n.NsInformer.Informer().HasSynced) { + return fmt.Errorf("Namespace informer error: %w", models.ErrInformerSyncFailure) } - if !cache.WaitForCacheSync(stopCh, n.npInformer.Informer().HasSynced) { - return fmt.Errorf("NetworkPolicy informer error: %w", ErrInformerSyncFailure) + if !cache.WaitForCacheSync(stopCh, n.NpInformer.Informer().HasSynced) { + return fmt.Errorf("NetworkPolicy informer error: %w", models.ErrInformerSyncFailure) } // start v2 NPM controllers after synced - go n.podControllerV2.Run(stopCh) - go n.namespaceControllerV2.Run(stopCh) - go n.netPolControllerV2.Run(stopCh) + go n.PodControllerV2.Run(stopCh) + go n.NamespaceControllerV2.Run(stopCh) + go n.NetPolControllerV2.Run(stopCh) // start the transport layer (gRPC) server // We block the main thread here until the server is stopped. diff --git a/npm/daemon.go b/npm/daemon/daemon.go similarity index 88% rename from npm/daemon.go rename to npm/daemon/daemon.go index a06c7971e..b261dbc30 100644 --- a/npm/daemon.go +++ b/npm/daemon/daemon.go @@ -1,9 +1,10 @@ // Copyright 2018 Microsoft. All rights reserved. // MIT License -package npm +package daemon import ( "context" + "errors" "fmt" npmconfig "github.com/Azure/azure-container-networking/npm/config" @@ -12,6 +13,10 @@ import ( "github.com/Azure/azure-container-networking/npm/pkg/transport" ) +var aiMetadata string //nolint // aiMetadata is set in Makefile + +var ErrDataplaneNotInitialized = errors.New("dataplane is not initialized") + type NetworkPolicyDaemon struct { ctx context.Context config npmconfig.Config diff --git a/npm/deploy/kustomize/README.md b/npm/deploy/kustomize/README.md new file mode 100644 index 000000000..f83ca04c3 --- /dev/null +++ b/npm/deploy/kustomize/README.md @@ -0,0 +1,36 @@ +# Kustomize based deployment + +## Prerequisites + +- [Kustomize](https://kustomize.io/) - Follow the instructions below to install it. + + ```terminal + curl -s "https://raw.githubusercontent.com/kubernetes-sigs/kustomize/master/hack/install_kustomize.sh" | bash + ``` + + For other installation options refer to https://kubectl.docs.kubernetes.io/installation/kustomize. + + To generate the resources for the **controller**, run the following command: + + ```terminal + kustomize build overlays/controller > /tmp/controller.yaml + ``` + +## Deploying to the cluster + +### NPM Controller + +To generate the resources for the **daemon**, run the following command: + +```terminal +kustomize build overlays/daemon > /tmp/daemon.yaml +``` + +### NPM Daemon + +> `kustomize` is not required for this step, since it is already bundled in the `kubectl` binary. + +To deploy the daemon to your cluster, run the following command: +```terminal +kubectl apply -k overlays/daemon +``` diff --git a/npm/deploy/kustomize/base/configmap.yaml b/npm/deploy/kustomize/base/configmap.yaml new file mode 100644 index 000000000..d9f549f2a --- /dev/null +++ b/npm/deploy/kustomize/base/configmap.yaml @@ -0,0 +1,25 @@ +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: azure-npm-config + namespace: kube-system +data: + azure-npm.json: | + { + "ResyncPeriodInMinutes": 15, + "ListeningPort": 10091, + "ListeningAddress": "0.0.0.0", + "Toggles": { + "EnablePrometheusMetrics": true, + "EnablePprof": true, + "EnableHTTPDebugAPI": true, + "EnableV2NPM": false, + "PlaceAzureChainFirst": false + }, + "Transport": { + "Address": "azure-npm.kube-system.svc.cluster.local" + "Port": 10092, + "ServicePort": 9001 + } + } diff --git a/npm/deploy/kustomize/base/kustomization.yaml b/npm/deploy/kustomize/base/kustomization.yaml new file mode 100644 index 000000000..cf4b75aa8 --- /dev/null +++ b/npm/deploy/kustomize/base/kustomization.yaml @@ -0,0 +1,7 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + +resources: + - configmap.yaml + - serviceaccount.yaml + - rbac.yaml diff --git a/npm/deploy/kustomize/base/rbac.yaml b/npm/deploy/kustomize/base/rbac.yaml new file mode 100644 index 000000000..c1a2565e3 --- /dev/null +++ b/npm/deploy/kustomize/base/rbac.yaml @@ -0,0 +1,44 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: azure-npm + namespace: kube-system + labels: + addonmanager.kubernetes.io/mode: EnsureExists +rules: + - apiGroups: + - "" + resources: + - pods + - nodes + - namespaces + verbs: + - get + - list + - watch + - apiGroups: + - networking.k8s.io + resources: + - networkpolicies + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: azure-npm-binding + namespace: kube-system + labels: + addonmanager.kubernetes.io/mode: EnsureExists +subjects: + - kind: ServiceAccount + name: azure-npm + namespace: kube-system +roleRef: + kind: ClusterRole + name: azure-npm + apiGroup: rbac.authorization.k8s.io +--- diff --git a/npm/deploy/kustomize/base/serviceaccount.yaml b/npm/deploy/kustomize/base/serviceaccount.yaml new file mode 100644 index 000000000..97a508c1b --- /dev/null +++ b/npm/deploy/kustomize/base/serviceaccount.yaml @@ -0,0 +1,7 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: azure-npm + namespace: kube-system + labels: + addonmanager.kubernetes.io/mode: EnsureExists diff --git a/npm/deploy/kustomize/overlays/controller/deployment.yaml b/npm/deploy/kustomize/overlays/controller/deployment.yaml new file mode 100644 index 000000000..f6294e3a5 --- /dev/null +++ b/npm/deploy/kustomize/overlays/controller/deployment.yaml @@ -0,0 +1,77 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: azure-npm-controller + namespace: kube-system + labels: + app: azure-npm + component: controller + addonmanager.kubernetes.io/mode: EnsureExists +spec: + selector: + matchLabels: + k8s-app: azure-npm + component: controller + template: + metadata: + labels: + k8s-app: azure-npm + component: controller + annotations: + scheduler.alpha.kubernetes.io/critical-pod: '' + azure.npm/scrapeable: '' + spec: + priorityClassName: system-node-critical + tolerations: + - operator: "Exists" + effect: NoExecute + - operator: "Exists" + effect: NoSchedule + - key: CriticalAddonsOnly + operator: Exists + containers: + - name: azure-npm + ports: + - name: metrics + containerPort: 10091 + - name: http + containerPort: 10092 + image: azure-npm:v1.4.1 + command: ["azure-npm"] + args: ["start", "controlplane"] + resources: + limits: + cpu: 250m + memory: 300Mi + requests: + cpu: 250m + securityContext: + privileged: true + env: + - name: HOSTNAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: NPM_CONFIG + value: /etc/azure-npm/azure-npm.json + volumeMounts: + - name: log + mountPath: /var/log + - name: protocols + mountPath: /etc/protocols + - name: azure-npm-config + mountPath: /etc/azure-npm + volumes: + - name: log + hostPath: + path: /var/log + type: Directory + - name: protocols + hostPath: + path: /etc/protocols + type: File + - name: azure-npm-config + configMap: + name: azure-npm-config + serviceAccountName: azure-npm diff --git a/npm/deploy/kustomize/overlays/controller/kustomization.yaml b/npm/deploy/kustomize/overlays/controller/kustomization.yaml new file mode 100644 index 000000000..03c002324 --- /dev/null +++ b/npm/deploy/kustomize/overlays/controller/kustomization.yaml @@ -0,0 +1,7 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +bases: +- ../../base +resources: + - deployment.yaml + - service.yaml diff --git a/npm/deploy/kustomize/overlays/controller/service.yaml b/npm/deploy/kustomize/overlays/controller/service.yaml new file mode 100644 index 000000000..3db16d9ac --- /dev/null +++ b/npm/deploy/kustomize/overlays/controller/service.yaml @@ -0,0 +1,34 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: npm-controller-metrics-cluster-service + namespace: kube-system + labels: + app: azure-npm + component: controller +spec: + selector: + k8s-app: azure-npm + component: controller + ports: + - port: 9000 + name: metrics + targetPort: 10091 +--- +apiVersion: v1 +kind: Service +metadata: + name: azure-npm + namespace: kube-system + labels: + app: azure-npm + component: controller +spec: + selector: + k8s-app: azure-npm + component: controller + ports: + - name: http + port: 9001 + targetPort: 10092 diff --git a/npm/deploy/kustomize/overlays/daemon/deployment.yaml b/npm/deploy/kustomize/overlays/daemon/deployment.yaml new file mode 100644 index 000000000..93728fc46 --- /dev/null +++ b/npm/deploy/kustomize/overlays/daemon/deployment.yaml @@ -0,0 +1,90 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: azure-npm-deamon + namespace: kube-system + labels: + app: azure-npm + component: daemon + addonmanager.kubernetes.io/mode: EnsureExists +spec: + selector: + matchLabels: + k8s-app: azure-npm + component: daemon + template: + metadata: + labels: + k8s-app: azure-npm + component: daemon + annotations: + scheduler.alpha.kubernetes.io/critical-pod: '' + azure.npm/scrapeable: '' + spec: + priorityClassName: system-node-critical + tolerations: + - operator: "Exists" + effect: NoExecute + - operator: "Exists" + effect: NoSchedule + - key: CriticalAddonsOnly + operator: Exists + containers: + - name: azure-npm + ports: + - name: metrics + containerPort: 10091 + image: azure-npm:v1.4.1 + command: ["azure-npm"] + args: ["start", "daemon"] + resources: + limits: + cpu: 250m + memory: 300Mi + requests: + cpu: 250m + securityContext: + privileged: true + env: + - name: HOSTNAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: NPM_CONFIG + value: /etc/azure-npm/azure-npm.json + - name: DEAMON_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: DEAMON_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + volumeMounts: + - name: log + mountPath: /var/log + - name: xtables-lock + mountPath: /run/xtables.lock + - name: protocols + mountPath: /etc/protocols + - name: azure-npm-config + mountPath: /etc/azure-npm + hostNetwork: true + volumes: + - name: log + hostPath: + path: /var/log + type: Directory + - name: xtables-lock + hostPath: + path: /run/xtables.lock + type: File + - name: protocols + hostPath: + path: /etc/protocols + type: File + - name: azure-npm-config + configMap: + name: azure-npm-config + serviceAccountName: azure-npm diff --git a/npm/deploy/kustomize/overlays/daemon/kustomization.yaml b/npm/deploy/kustomize/overlays/daemon/kustomization.yaml new file mode 100644 index 000000000..03c002324 --- /dev/null +++ b/npm/deploy/kustomize/overlays/daemon/kustomization.yaml @@ -0,0 +1,7 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +bases: +- ../../base +resources: + - deployment.yaml + - service.yaml diff --git a/npm/deploy/kustomize/overlays/daemon/service.yaml b/npm/deploy/kustomize/overlays/daemon/service.yaml new file mode 100644 index 000000000..2cdbe35e2 --- /dev/null +++ b/npm/deploy/kustomize/overlays/daemon/service.yaml @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: Service +metadata: + name: npm-deamon-metrics-cluster-service + namespace: kube-system + labels: + app: azure-npm + component: daemon +spec: + selector: + k8s-app: azure-npm + component: deamon + ports: + - name: metrics + port: 9000 + targetPort: 10091 diff --git a/npm/deploy/manifests/common/npm-configmap.yaml b/npm/deploy/manifests/common/npm-configmap.yaml new file mode 100644 index 000000000..4d8bd0d38 --- /dev/null +++ b/npm/deploy/manifests/common/npm-configmap.yaml @@ -0,0 +1,24 @@ +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: azure-npm-config + namespace: kube-system +data: + azure-npm.json: | + { + "ResyncPeriodInMinutes": 15, + "ListeningPort": 10091, + "ListeningAddress": "0.0.0.0", + "Toggles": { + "EnablePrometheusMetrics": true, + "EnablePprof": true, + "EnableHTTPDebugAPI": true, + "EnableV2NPM": false, + "PlaceAzureChainFirst": false + }, + "Transport": { + "Address": "azure-npm.kube-system.svc.cluster.local" + "Port": 10092 + } + } diff --git a/npm/deploy/manifests/common/npm-serviceaccount.yaml b/npm/deploy/manifests/common/npm-serviceaccount.yaml new file mode 100644 index 000000000..97a508c1b --- /dev/null +++ b/npm/deploy/manifests/common/npm-serviceaccount.yaml @@ -0,0 +1,7 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: azure-npm + namespace: kube-system + labels: + addonmanager.kubernetes.io/mode: EnsureExists diff --git a/npm/deploy/manifests/common/rbac.yaml b/npm/deploy/manifests/common/rbac.yaml new file mode 100644 index 000000000..c1a2565e3 --- /dev/null +++ b/npm/deploy/manifests/common/rbac.yaml @@ -0,0 +1,44 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: azure-npm + namespace: kube-system + labels: + addonmanager.kubernetes.io/mode: EnsureExists +rules: + - apiGroups: + - "" + resources: + - pods + - nodes + - namespaces + verbs: + - get + - list + - watch + - apiGroups: + - networking.k8s.io + resources: + - networkpolicies + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: azure-npm-binding + namespace: kube-system + labels: + addonmanager.kubernetes.io/mode: EnsureExists +subjects: + - kind: ServiceAccount + name: azure-npm + namespace: kube-system +roleRef: + kind: ClusterRole + name: azure-npm + apiGroup: rbac.authorization.k8s.io +--- diff --git a/npm/deploy/manifests/controller/azure-npm.yaml b/npm/deploy/manifests/controller/azure-npm.yaml new file mode 100644 index 000000000..5804e79db --- /dev/null +++ b/npm/deploy/manifests/controller/azure-npm.yaml @@ -0,0 +1,190 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + addonmanager.kubernetes.io/mode: EnsureExists + name: azure-npm + namespace: kube-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + addonmanager.kubernetes.io/mode: EnsureExists + name: azure-npm + namespace: kube-system +rules: +- apiGroups: + - "" + resources: + - pods + - nodes + - namespaces + verbs: + - get + - list + - watch +- apiGroups: + - networking.k8s.io + resources: + - networkpolicies + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + addonmanager.kubernetes.io/mode: EnsureExists + name: azure-npm-binding + namespace: kube-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: azure-npm +subjects: +- kind: ServiceAccount + name: azure-npm + namespace: kube-system +--- +apiVersion: v1 +data: + azure-npm.json: | + { + "ResyncPeriodInMinutes": 15, + "ListeningPort": 10091, + "ListeningAddress": "0.0.0.0", + "Toggles": { + "EnablePrometheusMetrics": true, + "EnablePprof": true, + "EnableHTTPDebugAPI": true, + "EnableV2NPM": false, + "PlaceAzureChainFirst": false + }, + "Transport": { + "Address": "azure-npm.kube-system.svc.cluster.local", + "Port": 19002, + "ServicePort": 9001 + } + } +kind: ConfigMap +metadata: + name: azure-npm-config + namespace: kube-system +--- +apiVersion: v1 +kind: Service +metadata: + labels: + app: azure-npm + component: controller + name: azure-npm + namespace: kube-system +spec: + ports: + - name: http + port: 9001 + targetPort: 10092 + selector: + component: controller + k8s-app: azure-npm +--- +apiVersion: v1 +kind: Service +metadata: + labels: + app: azure-npm + component: controller + name: npm-controller-metrics-cluster-service + namespace: kube-system +spec: + ports: + - name: metrics + port: 9000 + targetPort: 10091 + selector: + component: controller + k8s-app: azure-npm +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + addonmanager.kubernetes.io/mode: EnsureExists + app: azure-npm + component: controller + name: azure-npm-controller + namespace: kube-system +spec: + selector: + matchLabels: + component: controller + k8s-app: azure-npm + template: + metadata: + annotations: + azure.npm/scrapeable: "" + scheduler.alpha.kubernetes.io/critical-pod: "" + labels: + component: controller + k8s-app: azure-npm + spec: + containers: + - args: + - start + - controlplane + ports: + - name: metrics + containerPort: 10091 + - name: http + containerPort: 10092 + command: + - azure-npm + env: + - name: HOSTNAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: NPM_CONFIG + value: /etc/azure-npm/azure-npm.json + image: azure-npm:v1.4.1 + name: azure-npm + resources: + limits: + cpu: 250m + memory: 300Mi + requests: + cpu: 250m + securityContext: + privileged: true + volumeMounts: + - mountPath: /var/log + name: log + - mountPath: /etc/protocols + name: protocols + - mountPath: /etc/azure-npm + name: azure-npm-config + priorityClassName: system-node-critical + serviceAccountName: azure-npm + tolerations: + - effect: NoExecute + operator: Exists + - effect: NoSchedule + operator: Exists + - key: CriticalAddonsOnly + operator: Exists + volumes: + - hostPath: + path: /var/log + type: Directory + name: log + - hostPath: + path: /etc/protocols + type: File + name: protocols + - configMap: + name: azure-npm-config + name: azure-npm-config diff --git a/npm/deploy/manifests/daemon/azure-npm.yaml b/npm/deploy/manifests/daemon/azure-npm.yaml new file mode 100644 index 000000000..ca4508cb3 --- /dev/null +++ b/npm/deploy/manifests/daemon/azure-npm.yaml @@ -0,0 +1,186 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + addonmanager.kubernetes.io/mode: EnsureExists + name: azure-npm + namespace: kube-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + addonmanager.kubernetes.io/mode: EnsureExists + name: azure-npm + namespace: kube-system +rules: +- apiGroups: + - "" + resources: + - pods + - nodes + - namespaces + verbs: + - get + - list + - watch +- apiGroups: + - networking.k8s.io + resources: + - networkpolicies + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + addonmanager.kubernetes.io/mode: EnsureExists + name: azure-npm-binding + namespace: kube-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: azure-npm +subjects: +- kind: ServiceAccount + name: azure-npm + namespace: kube-system +--- +apiVersion: v1 +data: + azure-npm.json: | + { + "ResyncPeriodInMinutes": 15, + "ListeningPort": 10091, + "ListeningAddress": "0.0.0.0", + "Toggles": { + "EnablePrometheusMetrics": true, + "EnablePprof": true, + "EnableHTTPDebugAPI": true, + "EnableV2NPM": false, + "PlaceAzureChainFirst": false + }, + "Transport": { + "Address": "azure-npm.kube-system.svc.cluster.local", + "Port": 10092, + "ServicePort": 9001 + } + } +kind: ConfigMap +metadata: + name: azure-npm-config + namespace: kube-system +--- +apiVersion: v1 +kind: Service +metadata: + labels: + app: azure-npm + component: daemon + name: npm-deamon-metrics-cluster-service + namespace: kube-system +spec: + ports: + - name: metrics + port: 9000 + targetPort: 10091 + selector: + component: deamon + k8s-app: azure-npm +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + labels: + addonmanager.kubernetes.io/mode: EnsureExists + app: azure-npm + component: daemon + name: azure-npm-deamon + namespace: kube-system +spec: + selector: + matchLabels: + component: daemon + k8s-app: azure-npm + template: + metadata: + annotations: + azure.npm/scrapeable: "" + scheduler.alpha.kubernetes.io/critical-pod: "" + labels: + component: daemon + k8s-app: azure-npm + spec: + containers: + - args: + - start + - daemon + command: + - azure-npm + env: + - name: HOSTNAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: NPM_CONFIG + value: /etc/azure-npm/azure-npm.json + - name: DAEMON_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: DAEMON_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + image: azure-npm:v1.4.1 + name: azure-npm + ports: + - name: metrics + containerPort: 10091 + resources: + limits: + cpu: 250m + memory: 300Mi + requests: + cpu: 250m + securityContext: + privileged: true + volumeMounts: + - mountPath: /var/log + name: log + - mountPath: /run/xtables.lock + name: xtables-lock + - mountPath: /etc/protocols + name: protocols + - mountPath: /etc/azure-npm + name: azure-npm-config + hostNetwork: true + priorityClassName: system-node-critical + serviceAccountName: azure-npm + tolerations: + - effect: NoExecute + operator: Exists + - effect: NoSchedule + operator: Exists + - key: CriticalAddonsOnly + operator: Exists + volumes: + - hostPath: + path: /var/log + type: Directory + name: log + - hostPath: + path: /run/xtables.lock + type: File + name: xtables-lock + - hostPath: + path: /etc/protocols + type: File + name: protocols + - configMap: + name: azure-npm-config + name: azure-npm-config diff --git a/npm/deploy/npm/azure-npm.yaml b/npm/deploy/npm/azure-npm.yaml new file mode 100644 index 000000000..bf4be6d67 --- /dev/null +++ b/npm/deploy/npm/azure-npm.yaml @@ -0,0 +1,164 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: azure-npm + namespace: kube-system + labels: + addonmanager.kubernetes.io/mode: EnsureExists +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: azure-npm + namespace: kube-system + labels: + addonmanager.kubernetes.io/mode: EnsureExists +rules: + - apiGroups: + - "" + resources: + - pods + - nodes + - namespaces + verbs: + - get + - list + - watch + - apiGroups: + - networking.k8s.io + resources: + - networkpolicies + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: azure-npm-binding + namespace: kube-system + labels: + addonmanager.kubernetes.io/mode: EnsureExists +subjects: + - kind: ServiceAccount + name: azure-npm + namespace: kube-system +roleRef: + kind: ClusterRole + name: azure-npm + apiGroup: rbac.authorization.k8s.io +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: azure-npm + namespace: kube-system + labels: + app: azure-npm + addonmanager.kubernetes.io/mode: EnsureExists +spec: + selector: + matchLabels: + k8s-app: azure-npm + template: + metadata: + labels: + k8s-app: azure-npm + annotations: + scheduler.alpha.kubernetes.io/critical-pod: '' + azure.npm/scrapeable: '' + spec: + priorityClassName: system-node-critical + tolerations: + - operator: "Exists" + effect: NoExecute + - operator: "Exists" + effect: NoSchedule + - key: CriticalAddonsOnly + operator: Exists + containers: + - name: azure-npm + image: mcr.microsoft.com/containernetworking/azure-npm:v1.4.1 + resources: + limits: + cpu: 250m + memory: 300Mi + requests: + cpu: 250m + securityContext: + privileged: true + env: + - name: HOSTNAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: NPM_CONFIG + value: /etc/azure-npm/azure-npm.json + volumeMounts: + - name: log + mountPath: /var/log + - name: xtables-lock + mountPath: /run/xtables.lock + - name: protocols + mountPath: /etc/protocols + - name: azure-npm-config + mountPath: /etc/azure-npm + hostNetwork: true + volumes: + - name: log + hostPath: + path: /var/log + type: Directory + - name: xtables-lock + hostPath: + path: /run/xtables.lock + type: File + - name: protocols + hostPath: + path: /etc/protocols + type: File + - name: azure-npm-config + configMap: + name: azure-npm-config + serviceAccountName: azure-npm +--- +apiVersion: v1 +kind: Service +metadata: + name: npm-metrics-cluster-service + namespace: kube-system + labels: + app: npm-metrics +spec: + selector: + k8s-app: azure-npm + ports: + - port: 9000 + targetPort: 10091 +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: azure-npm-config + namespace: kube-system +data: + azure-npm.json: | + { + "ResyncPeriodInMinutes": 15, + "ListeningPort": 10091, + "ListeningAddress": "0.0.0.0", + "Toggles": { + "EnablePrometheusMetrics": true, + "EnablePprof": true, + "EnableHTTPDebugAPI": true, + "EnableV2NPM": false, + "PlaceAzureChainFirst": false + }, + "Transport": { + "Address": "azure-npm.kube-system.svc.cluster.local", + "Port": 19002, + "ServicePort": 9001 + } + } diff --git a/npm/examples/windows/azure-npm.yaml b/npm/examples/windows/azure-npm.yaml index 3b7af924a..d892e95bb 100644 --- a/npm/examples/windows/azure-npm.yaml +++ b/npm/examples/windows/azure-npm.yaml @@ -143,6 +143,11 @@ data: "EnableV2NPM": true, "PlaceAzureChainFirst": false, "ApplyIPSetsOnNeed": false + }, + "Transport": { + "Address": "azure-npm.kube-system.svc.cluster.local", + "Port": 10092, + "ServicePort": 9001 } } diff --git a/npm/npm.go b/npm/npm.go index bcb3d7354..e116a99fa 100644 --- a/npm/npm.go +++ b/npm/npm.go @@ -5,13 +5,13 @@ package npm import ( "encoding/json" "fmt" - "os" npmconfig "github.com/Azure/azure-container-networking/npm/config" "github.com/Azure/azure-container-networking/npm/ipsm" controllersv1 "github.com/Azure/azure-container-networking/npm/pkg/controlplane/controllers/v1" controllersv2 "github.com/Azure/azure-container-networking/npm/pkg/controlplane/controllers/v2" "github.com/Azure/azure-container-networking/npm/pkg/dataplane" + "github.com/Azure/azure-container-networking/npm/pkg/models" "github.com/pkg/errors" "k8s.io/apimachinery/pkg/version" "k8s.io/client-go/informers" @@ -20,7 +20,30 @@ import ( utilexec "k8s.io/utils/exec" ) -var ErrDataplaneNotInitialized = errors.New("dataplane is not initialized") +var aiMetadata string //nolint // aiMetadata is set in Makefile + +// NetworkPolicyManager contains informers for pod, namespace and networkpolicy. +type NetworkPolicyManager struct { + config npmconfig.Config + + // ipsMgr are shared in all controllers. Thus, only one ipsMgr is created for simple management + // and uses lock to avoid unintentional race condictions in IpsetManager. + ipsMgr *ipsm.IpsetManager + + // Informers are the Kubernetes Informer + // https://pkg.go.dev/k8s.io/client-go/informers + models.Informers + + // Legacy controllers for handling Kubernetes resource watcher events + // To be deprecated + models.K8SControllersV1 + + // Controllers for handling Kubernetes resource watcher events + models.K8SControllersV2 + + // Azure-specific variables + models.AzureConfig +} // NewNetworkPolicyManager creates a NetworkPolicyManager func NewNetworkPolicyManager(config npmconfig.Config, @@ -33,93 +56,93 @@ func NewNetworkPolicyManager(config npmconfig.Config, npMgr := &NetworkPolicyManager{ config: config, - Informers: Informers{ - informerFactory: informerFactory, - podInformer: informerFactory.Core().V1().Pods(), - nsInformer: informerFactory.Core().V1().Namespaces(), - npInformer: informerFactory.Networking().V1().NetworkPolicies(), + Informers: models.Informers{ + InformerFactory: informerFactory, + PodInformer: informerFactory.Core().V1().Pods(), + NsInformer: informerFactory.Core().V1().Namespaces(), + NpInformer: informerFactory.Networking().V1().NetworkPolicies(), }, - AzureConfig: AzureConfig{ - k8sServerVersion: k8sServerVersion, - NodeName: GetNodeName(), - version: npmVersion, + AzureConfig: models.AzureConfig{ + K8sServerVersion: k8sServerVersion, + NodeName: models.GetNodeName(), + Version: npmVersion, TelemetryEnabled: true, }, } // create v2 NPM specific components. if npMgr.config.Toggles.EnableV2NPM { - npMgr.npmNamespaceCacheV2 = &controllersv2.NpmNamespaceCache{NsMap: make(map[string]*controllersv2.Namespace)} - npMgr.podControllerV2 = controllersv2.NewPodController(npMgr.podInformer, dp, npMgr.npmNamespaceCacheV2) - npMgr.namespaceControllerV2 = controllersv2.NewNamespaceController(npMgr.nsInformer, dp, npMgr.npmNamespaceCacheV2) + npMgr.NpmNamespaceCacheV2 = &controllersv2.NpmNamespaceCache{NsMap: make(map[string]*controllersv2.Namespace)} + npMgr.PodControllerV2 = controllersv2.NewPodController(npMgr.PodInformer, dp, npMgr.NpmNamespaceCacheV2) + npMgr.NamespaceControllerV2 = controllersv2.NewNamespaceController(npMgr.NsInformer, dp, npMgr.NpmNamespaceCacheV2) // Question(jungukcho): Is config.Toggles.PlaceAzureChainFirst needed for v2? - npMgr.netPolControllerV2 = controllersv2.NewNetworkPolicyController(npMgr.npInformer, dp) + npMgr.NetPolControllerV2 = controllersv2.NewNetworkPolicyController(npMgr.NpInformer, dp) return npMgr } // create v1 NPM specific components. npMgr.ipsMgr = ipsm.NewIpsetManager(exec) - npMgr.npmNamespaceCacheV1 = &controllersv1.NpmNamespaceCache{NsMap: make(map[string]*controllersv1.Namespace)} - npMgr.podControllerV1 = controllersv1.NewPodController(npMgr.podInformer, npMgr.ipsMgr, npMgr.npmNamespaceCacheV1) - npMgr.namespaceControllerV1 = controllersv1.NewNameSpaceController(npMgr.nsInformer, npMgr.ipsMgr, npMgr.npmNamespaceCacheV1) - npMgr.netPolControllerV1 = controllersv1.NewNetworkPolicyController(npMgr.npInformer, npMgr.ipsMgr, config.Toggles.PlaceAzureChainFirst) + npMgr.NpmNamespaceCacheV1 = &controllersv1.NpmNamespaceCache{NsMap: make(map[string]*controllersv1.Namespace)} + npMgr.PodControllerV1 = controllersv1.NewPodController(npMgr.PodInformer, npMgr.ipsMgr, npMgr.NpmNamespaceCacheV1) + npMgr.NamespaceControllerV1 = controllersv1.NewNameSpaceController(npMgr.NsInformer, npMgr.ipsMgr, npMgr.NpmNamespaceCacheV1) + npMgr.NetPolControllerV1 = controllersv1.NewNetworkPolicyController(npMgr.NpInformer, npMgr.ipsMgr, config.Toggles.PlaceAzureChainFirst) return npMgr } func (npMgr *NetworkPolicyManager) MarshalJSON() ([]byte, error) { - m := map[CacheKey]json.RawMessage{} + m := map[models.CacheKey]json.RawMessage{} var npmNamespaceCacheRaw []byte var err error if npMgr.config.Toggles.EnableV2NPM { - npmNamespaceCacheRaw, err = json.Marshal(npMgr.npmNamespaceCacheV2) + npmNamespaceCacheRaw, err = json.Marshal(npMgr.NpmNamespaceCacheV2) } else { - npmNamespaceCacheRaw, err = json.Marshal(npMgr.npmNamespaceCacheV1) + npmNamespaceCacheRaw, err = json.Marshal(npMgr.NpmNamespaceCacheV1) } if err != nil { - return nil, errors.Errorf("%s: %v", errMarshalNPMCache, err) + return nil, errors.Errorf("%s: %v", models.ErrMarshalNPMCache, err) } - m[NsMap] = npmNamespaceCacheRaw + m[models.NsMap] = npmNamespaceCacheRaw var podControllerRaw []byte if npMgr.config.Toggles.EnableV2NPM { - podControllerRaw, err = json.Marshal(npMgr.podControllerV2) + podControllerRaw, err = json.Marshal(npMgr.PodControllerV2) } else { - podControllerRaw, err = json.Marshal(npMgr.podControllerV1) + podControllerRaw, err = json.Marshal(npMgr.PodControllerV1) } if err != nil { - return nil, errors.Errorf("%s: %v", errMarshalNPMCache, err) + return nil, errors.Errorf("%s: %v", models.ErrMarshalNPMCache, err) } - m[PodMap] = podControllerRaw + m[models.PodMap] = podControllerRaw // TODO(jungukcho): NPM debug may be broken. // Will fix it later after v2 controller and linux test if it is broken. if !npMgr.config.Toggles.EnableV2NPM && npMgr.ipsMgr != nil { listMapRaw, listMapMarshalErr := npMgr.ipsMgr.MarshalListMapJSON() if listMapMarshalErr != nil { - return nil, errors.Errorf("%s: %v", errMarshalNPMCache, listMapMarshalErr) + return nil, errors.Errorf("%s: %v", models.ErrMarshalNPMCache, listMapMarshalErr) } - m[ListMap] = listMapRaw + m[models.ListMap] = listMapRaw setMapRaw, setMapMarshalErr := npMgr.ipsMgr.MarshalSetMapJSON() if setMapMarshalErr != nil { - return nil, errors.Errorf("%s: %v", errMarshalNPMCache, setMapMarshalErr) + return nil, errors.Errorf("%s: %v", models.ErrMarshalNPMCache, setMapMarshalErr) } - m[SetMap] = setMapRaw + m[models.SetMap] = setMapRaw } nodeNameRaw, err := json.Marshal(npMgr.NodeName) if err != nil { - return nil, errors.Errorf("%s: %v", errMarshalNPMCache, err) + return nil, errors.Errorf("%s: %v", models.ErrMarshalNPMCache, err) } - m[NodeName] = nodeNameRaw + m[models.NodeName] = nodeNameRaw npmCacheRaw, err := json.Marshal(m) if err != nil { - return nil, errors.Errorf("%s: %v", errMarshalNPMCache, err) + return nil, errors.Errorf("%s: %v", models.ErrMarshalNPMCache, err) } return npmCacheRaw, nil @@ -127,47 +150,47 @@ func (npMgr *NetworkPolicyManager) MarshalJSON() ([]byte, error) { // GetAppVersion returns network policy manager app version func (npMgr *NetworkPolicyManager) GetAppVersion() string { - return npMgr.version + return npMgr.Version } // Start starts shared informers and waits for the shared informer cache to sync. func (npMgr *NetworkPolicyManager) Start(config npmconfig.Config, stopCh <-chan struct{}) error { if !config.Toggles.EnableV2NPM { // Do initialization of data plane before starting syncup of each controller to avoid heavy call to api-server - if err := npMgr.netPolControllerV1.ResetDataPlane(); err != nil { + if err := npMgr.NetPolControllerV1.ResetDataPlane(); err != nil { return fmt.Errorf("Failed to initialized data plane with err %w", err) } } // Starts all informers manufactured by npMgr's informerFactory. - npMgr.informerFactory.Start(stopCh) + npMgr.InformerFactory.Start(stopCh) // Wait for the initial sync of local cache. - if !cache.WaitForCacheSync(stopCh, npMgr.podInformer.Informer().HasSynced) { - return fmt.Errorf("Pod informer error: %w", ErrInformerSyncFailure) + if !cache.WaitForCacheSync(stopCh, npMgr.PodInformer.Informer().HasSynced) { + return fmt.Errorf("Pod informer error: %w", models.ErrInformerSyncFailure) } - if !cache.WaitForCacheSync(stopCh, npMgr.nsInformer.Informer().HasSynced) { - return fmt.Errorf("Namespace informer error: %w", ErrInformerSyncFailure) + if !cache.WaitForCacheSync(stopCh, npMgr.NsInformer.Informer().HasSynced) { + return fmt.Errorf("Namespace informer error: %w", models.ErrInformerSyncFailure) } - if !cache.WaitForCacheSync(stopCh, npMgr.npInformer.Informer().HasSynced) { - return fmt.Errorf("NetworkPolicy informer error: %w", ErrInformerSyncFailure) + if !cache.WaitForCacheSync(stopCh, npMgr.NpInformer.Informer().HasSynced) { + return fmt.Errorf("NetworkPolicy informer error: %w", models.ErrInformerSyncFailure) } // start v2 NPM controllers after synced if config.Toggles.EnableV2NPM { - go npMgr.podControllerV2.Run(stopCh) - go npMgr.namespaceControllerV2.Run(stopCh) - go npMgr.netPolControllerV2.Run(stopCh) + go npMgr.PodControllerV2.Run(stopCh) + go npMgr.NamespaceControllerV2.Run(stopCh) + go npMgr.NetPolControllerV2.Run(stopCh) return nil } // start v1 NPM controllers after synced - go npMgr.podControllerV1.Run(stopCh) - go npMgr.namespaceControllerV1.Run(stopCh) - go npMgr.netPolControllerV1.Run(stopCh) - go npMgr.netPolControllerV1.RunPeriodicTasks(stopCh) + go npMgr.PodControllerV1.Run(stopCh) + go npMgr.NamespaceControllerV1.Run(stopCh) + go npMgr.NetPolControllerV1.Run(stopCh) + go npMgr.NetPolControllerV1.RunPeriodicTasks(stopCh) return nil } @@ -176,8 +199,3 @@ func (npMgr *NetworkPolicyManager) Start(config npmconfig.Config, stopCh <-chan func GetAIMetadata() string { return aiMetadata } - -func GetNodeName() string { - nodeName := os.Getenv(EnvNodeName) - return nodeName -} diff --git a/npm/consts.go b/npm/pkg/models/consts.go similarity index 76% rename from npm/consts.go rename to npm/pkg/models/consts.go index 9202eaf8d..403bfb9c9 100644 --- a/npm/consts.go +++ b/npm/pkg/models/consts.go @@ -1,4 +1,6 @@ -package npm +package models + +import "os" const ( heartbeatIntervalInMinutes = 30 //nolint:unused,deadcode,varcheck // ignore this error @@ -14,3 +16,8 @@ const ( EnvNodeName = "HOSTNAME" ) + +func GetNodeName() string { + nodeName := os.Getenv(EnvNodeName) + return nodeName +} diff --git a/npm/pkg/models/types.go b/npm/pkg/models/types.go new file mode 100644 index 000000000..28d3ffb80 --- /dev/null +++ b/npm/pkg/models/types.go @@ -0,0 +1,53 @@ +// Copyright 2018 Microsoft. All rights reserved. +// MIT License +package models + +import ( + controllersv1 "github.com/Azure/azure-container-networking/npm/pkg/controlplane/controllers/v1" + controllersv2 "github.com/Azure/azure-container-networking/npm/pkg/controlplane/controllers/v2" + "github.com/pkg/errors" + "k8s.io/apimachinery/pkg/version" + "k8s.io/client-go/informers" + coreinformers "k8s.io/client-go/informers/core/v1" + networkinginformers "k8s.io/client-go/informers/networking/v1" +) + +var ( + ErrMarshalNPMCache = errors.New("failed to marshal NPM Cache") + ErrInformerSyncFailure = errors.New("informer sync failure") +) + +// Cache is the cache lookup key for the NPM cache +type CacheKey string + +// K8SControllerV1 are the legacy k8s controllers +type K8SControllersV1 struct { + PodControllerV1 *controllersv1.PodController //nolint:structcheck //ignore this error + NamespaceControllerV1 *controllersv1.NamespaceController //nolint:structcheck // false lint error + NpmNamespaceCacheV1 *controllersv1.NpmNamespaceCache //nolint:structcheck // false lint error + NetPolControllerV1 *controllersv1.NetworkPolicyController //nolint:structcheck // false lint error +} + +// K8SControllerV2 are the optimized k8s controllers that replace the legacy controllers +type K8SControllersV2 struct { + PodControllerV2 *controllersv2.PodController //nolint:structcheck //ignore this error + NamespaceControllerV2 *controllersv2.NamespaceController //nolint:structcheck // false lint error + NpmNamespaceCacheV2 *controllersv2.NpmNamespaceCache //nolint:structcheck // false lint error + NetPolControllerV2 *controllersv2.NetworkPolicyController //nolint:structcheck // false lint error +} + +// Informers are the informers for the k8s controllers +type Informers struct { + InformerFactory informers.SharedInformerFactory //nolint:structcheck //ignore this error + PodInformer coreinformers.PodInformer //nolint:structcheck // false lint error + NsInformer coreinformers.NamespaceInformer //nolint:structcheck // false lint error + NpInformer networkinginformers.NetworkPolicyInformer //nolint:structcheck // false lint error +} + +// AzureConfig captures the Azure specific configurations and fields +type AzureConfig struct { + K8sServerVersion *version.Info + NodeName string + Version string + TelemetryEnabled bool +} diff --git a/npm/pkg/transport/events_client.go b/npm/pkg/transport/events_client.go index 481b1ed4f..5e91eea9e 100644 --- a/npm/pkg/transport/events_client.go +++ b/npm/pkg/transport/events_client.go @@ -35,7 +35,9 @@ func NewEventsClient(ctx context.Context, pod, node, addr string) (*EventsClient return nil, ErrAddressNil } + klog.Infof("Connecting to NPM controller gRPC server at address %s\n", addr) // TODO Make this secure + // TODO Remove WithBlock option post testing cc, err := grpc.DialContext(ctx, addr, grpc.WithInsecure(), grpc.WithBlock()) if err != nil { return nil, fmt.Errorf("failed to dial %s: %w", addr, err) diff --git a/npm/pkg/transport/events_server.go b/npm/pkg/transport/events_server.go index 714560bb4..3361531bb 100644 --- a/npm/pkg/transport/events_server.go +++ b/npm/pkg/transport/events_server.go @@ -134,8 +134,8 @@ func (m *EventsServer) start(stopCh <-chan struct{}) error { } func (m *EventsServer) handle() error { - klog.Info("Starting transport manager listener") - lis, err := net.Listen("tcp", fmt.Sprintf("localhost:%d", m.port)) + klog.Infof("Starting transport manager listener on port %v", m.port) + lis, err := net.Listen("tcp", fmt.Sprintf(":%d", m.port)) if err != nil { return fmt.Errorf("failed to handle server connections: %w", err) } diff --git a/npm/types.go b/npm/types.go deleted file mode 100644 index 1de1e025d..000000000 --- a/npm/types.go +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright 2018 Microsoft. All rights reserved. -// MIT License -package npm - -import ( - npmconfig "github.com/Azure/azure-container-networking/npm/config" - "github.com/Azure/azure-container-networking/npm/ipsm" - controllersv1 "github.com/Azure/azure-container-networking/npm/pkg/controlplane/controllers/v1" - controllersv2 "github.com/Azure/azure-container-networking/npm/pkg/controlplane/controllers/v2" - "github.com/pkg/errors" - "k8s.io/apimachinery/pkg/version" - "k8s.io/client-go/informers" - coreinformers "k8s.io/client-go/informers/core/v1" - networkinginformers "k8s.io/client-go/informers/networking/v1" -) - -var ( - aiMetadata string - errMarshalNPMCache = errors.New("failed to marshal NPM Cache") -) - -// NetworkPolicyManager contains informers for pod, namespace and networkpolicy. -type NetworkPolicyManager struct { - config npmconfig.Config - - // ipsMgr are shared in all controllers. Thus, only one ipsMgr is created for simple management - // and uses lock to avoid unintentional race condictions in IpsetManager. - ipsMgr *ipsm.IpsetManager - - // Informers are the Kubernetes Informer - // https://pkg.go.dev/k8s.io/client-go/informers - Informers - - // Legacy controllers for handling Kubernetes resource watcher events - // To be deprecated - K8SControllersV1 - - // Controllers for handling Kubernetes resource watcher events - K8SControllersV2 - - // Azure-specific variables - AzureConfig -} - -// Cache is the cache lookup key for the NPM cache -type CacheKey string - -// K8SControllerV1 are the legacy k8s controllers -type K8SControllersV1 struct { - podControllerV1 *controllersv1.PodController //nolint:structcheck //ignore this error - namespaceControllerV1 *controllersv1.NamespaceController //nolint:structcheck // false lint error - npmNamespaceCacheV1 *controllersv1.NpmNamespaceCache //nolint:structcheck // false lint error - netPolControllerV1 *controllersv1.NetworkPolicyController //nolint:structcheck // false lint error -} - -// K8SControllerV2 are the optimized k8s controllers that replace the legacy controllers -type K8SControllersV2 struct { - podControllerV2 *controllersv2.PodController //nolint:structcheck //ignore this error - namespaceControllerV2 *controllersv2.NamespaceController //nolint:structcheck // false lint error - npmNamespaceCacheV2 *controllersv2.NpmNamespaceCache //nolint:structcheck // false lint error - netPolControllerV2 *controllersv2.NetworkPolicyController //nolint:structcheck // false lint error -} - -// Informers are the informers for the k8s controllers -type Informers struct { - informerFactory informers.SharedInformerFactory //nolint:structcheck //ignore this error - podInformer coreinformers.PodInformer //nolint:structcheck // false lint error - nsInformer coreinformers.NamespaceInformer //nolint:structcheck // false lint error - npInformer networkinginformers.NetworkPolicyInformer //nolint:structcheck // false lint error -} - -// AzureConfig captures the Azure specific configurations and fields -type AzureConfig struct { - k8sServerVersion *version.Info - NodeName string - version string - TelemetryEnabled bool -}