feat: [NPM] Restructure code and add deploy manifests (#1203)
This commit is contained in:
Родитель
580c3e4072
Коммит
d3aeda737e
|
@ -0,0 +1,11 @@
|
|||
allow_k8s_contexts(k8s_context())
|
||||
default_registry('ttl.sh/nitishm-12390')
|
||||
docker_build('azure-npm', '.', dockerfile='npm/Dockerfile', build_args = {
|
||||
"VERSION": "v1.4.14-101-gf900e319-dirty",
|
||||
"NPM_AI_PATH": "github.com/Azure/azure-container-networking/npm.aiMetadata",
|
||||
"NPM_AI_ID": "014c22bd-4107-459e-8475-67909e96edcb"
|
||||
})
|
||||
# watch_file('npm')
|
||||
k8s_yaml('npm/deploy/manifests/controller/azure-npm.yaml')
|
||||
k8s_yaml('npm/deploy/manifests/daemon/azure-npm.yaml', allow_duplicates=True)
|
||||
|
|
@ -12,7 +12,7 @@ Azure-NPM serves as a distributed firewall for the Kubernetes cluster, and it ca
|
|||
|
||||
Running the command below will bring up one azure-npm instance on each Kubernetes node.
|
||||
```
|
||||
kubectl apply -f https://raw.githubusercontent.com/Azure/azure-container-networking/master/npm/azure-npm.yaml
|
||||
kubectl apply -f https://raw.githubusercontent.com/Azure/azure-container-networking/master/npm/deploy/npm/azure-npm.yaml
|
||||
```
|
||||
Now you can secure your Kubernetes cluster with Azure-NPM by applying Kubernetes network policies.
|
||||
|
||||
|
|
|
@ -97,10 +97,10 @@ spec:
|
|||
- name: NPM_CONFIG
|
||||
value: /etc/azure-npm/azure-npm.json
|
||||
volumeMounts:
|
||||
- name: xtables-lock
|
||||
mountPath: /run/xtables.lock
|
||||
- name: log
|
||||
mountPath: /var/log
|
||||
- name: xtables-lock
|
||||
mountPath: /run/xtables.lock
|
||||
- name: protocols
|
||||
mountPath: /etc/protocols
|
||||
- name: azure-npm-config
|
||||
|
|
|
@ -14,7 +14,13 @@ func NewRootCmd() *cobra.Command {
|
|||
},
|
||||
}
|
||||
|
||||
rootCmd.AddCommand(newStartNPMCmd())
|
||||
startCmd := newStartNPMCmd()
|
||||
|
||||
startCmd.AddCommand(newStartNPMControlplaneCmd())
|
||||
startCmd.AddCommand(newStartNPMDaemonCmd())
|
||||
|
||||
rootCmd.AddCommand(startCmd)
|
||||
|
||||
rootCmd.AddCommand(newDebugCmd())
|
||||
|
||||
return rootCmd
|
||||
|
|
|
@ -18,6 +18,7 @@ import (
|
|||
"github.com/Azure/azure-container-networking/npm/pkg/dataplane"
|
||||
"github.com/Azure/azure-container-networking/npm/pkg/dataplane/ipsets"
|
||||
"github.com/Azure/azure-container-networking/npm/pkg/dataplane/policies"
|
||||
"github.com/Azure/azure-container-networking/npm/pkg/models"
|
||||
"github.com/Azure/azure-container-networking/npm/util"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
|
@ -85,11 +86,6 @@ func newStartNPMCmd() *cobra.Command {
|
|||
|
||||
startNPMCmd.Flags().String(flagKubeConfigPath, flagDefaults[flagKubeConfigPath], "path to kubeconfig")
|
||||
|
||||
// The controlplane subcommand starts the NPM controller's controlplane component in the decomposed mode
|
||||
startNPMCmd.AddCommand(newStartNPMControlplaneCmd())
|
||||
// The daemon subcommand starts the NPM controller's datapath component in the daemon mode
|
||||
startNPMCmd.AddCommand(newStartNPMDaemonCmd())
|
||||
|
||||
return startNPMCmd
|
||||
}
|
||||
|
||||
|
@ -149,7 +145,7 @@ func start(config npmconfig.Config, flags npmconfig.Flags) error {
|
|||
} else {
|
||||
npmV2DataplaneCfg.IPSetMode = ipsets.ApplyAllIPSets
|
||||
}
|
||||
dp, err = dataplane.NewDataPlane(npm.GetNodeName(), common.NewIOShim(), npmV2DataplaneCfg, stopChannel)
|
||||
dp, err = dataplane.NewDataPlane(models.GetNodeName(), common.NewIOShim(), npmV2DataplaneCfg, stopChannel)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create dataplane with error %w", err)
|
||||
}
|
||||
|
|
|
@ -7,10 +7,11 @@ import (
|
|||
"strconv"
|
||||
|
||||
"github.com/Azure/azure-container-networking/common"
|
||||
"github.com/Azure/azure-container-networking/npm"
|
||||
npmconfig "github.com/Azure/azure-container-networking/npm/config"
|
||||
"github.com/Azure/azure-container-networking/npm/daemon"
|
||||
"github.com/Azure/azure-container-networking/npm/pkg/controlplane/goalstateprocessor"
|
||||
"github.com/Azure/azure-container-networking/npm/pkg/dataplane"
|
||||
"github.com/Azure/azure-container-networking/npm/pkg/models"
|
||||
"github.com/Azure/azure-container-networking/npm/pkg/transport"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
|
@ -48,7 +49,7 @@ func startDaemon(config npmconfig.Config) error {
|
|||
pod := os.Getenv(podNameEnv)
|
||||
node := os.Getenv(nodeNameEnv)
|
||||
|
||||
addr := config.Transport.Address + ":" + strconv.Itoa(config.Transport.Port)
|
||||
addr := config.Transport.Address + ":" + strconv.Itoa(config.Transport.ServicePort)
|
||||
ctx := context.Background()
|
||||
err := initLogging()
|
||||
if err != nil {
|
||||
|
@ -58,7 +59,7 @@ func startDaemon(config npmconfig.Config) error {
|
|||
|
||||
var dp dataplane.GenericDataplane
|
||||
|
||||
dp, err = dataplane.NewDataPlane(npm.GetNodeName(), common.NewIOShim(), npmV2DataplaneCfg, wait.NeverStop)
|
||||
dp, err = dataplane.NewDataPlane(models.GetNodeName(), common.NewIOShim(), npmV2DataplaneCfg, wait.NeverStop)
|
||||
if err != nil {
|
||||
klog.Errorf("failed to create dataplane: %v", err)
|
||||
return fmt.Errorf("failed to create dataplane with error %w", err)
|
||||
|
@ -76,7 +77,7 @@ func startDaemon(config npmconfig.Config) error {
|
|||
return fmt.Errorf("failed to create goalstate processor: %w", err)
|
||||
}
|
||||
|
||||
n, err := npm.NewNetworkPolicyDaemon(ctx, config, dp, gsp, client, version)
|
||||
n, err := daemon.NewNetworkPolicyDaemon(ctx, config, dp, gsp, client, version)
|
||||
if err != nil {
|
||||
klog.Errorf("failed to create dataplane : %v", err)
|
||||
return fmt.Errorf("failed to create dataplane: %w", err)
|
||||
|
|
|
@ -8,6 +8,7 @@ import (
|
|||
|
||||
"github.com/Azure/azure-container-networking/npm"
|
||||
npmconfig "github.com/Azure/azure-container-networking/npm/config"
|
||||
"github.com/Azure/azure-container-networking/npm/controller"
|
||||
restserver "github.com/Azure/azure-container-networking/npm/http/server"
|
||||
"github.com/Azure/azure-container-networking/npm/metrics"
|
||||
"github.com/Azure/azure-container-networking/npm/pkg/dataplane"
|
||||
|
@ -105,7 +106,7 @@ func startControlplane(config npmconfig.Config, flags npmconfig.Flags) error {
|
|||
return fmt.Errorf("failed to create dataplane with error: %w", err)
|
||||
}
|
||||
|
||||
npMgr, err := npm.NewNetworkPolicyServer(config, factory, mgr, dp, version, k8sServerVersion)
|
||||
npMgr, err := controller.NewNetworkPolicyServer(config, factory, mgr, dp, version, k8sServerVersion)
|
||||
if err != nil {
|
||||
klog.Errorf("failed to create NPM controlplane manager with error: %v", err)
|
||||
return fmt.Errorf("failed to create NPM controlplane manager: %w", err)
|
||||
|
|
|
@ -1,9 +1,10 @@
|
|||
package npmconfig
|
||||
|
||||
const (
|
||||
defaultResyncPeriod = 15
|
||||
defaultListeningPort = 10091
|
||||
defaultGrpcPort = 10092
|
||||
defaultResyncPeriod = 15
|
||||
defaultListeningPort = 10091
|
||||
defaultGrpcPort = 10092
|
||||
defaultGrpcServicePort = 9002
|
||||
// ConfigEnvPath is what's used by viper to load config path
|
||||
ConfigEnvPath = "NPM_CONFIG"
|
||||
)
|
||||
|
@ -16,8 +17,9 @@ var DefaultConfig = Config{
|
|||
ListeningAddress: "0.0.0.0",
|
||||
|
||||
Transport: GrpcServerConfig{
|
||||
Address: "0.0.0.0",
|
||||
Port: defaultGrpcPort,
|
||||
Address: "0.0.0.0",
|
||||
Port: defaultGrpcPort,
|
||||
ServicePort: defaultGrpcServicePort,
|
||||
},
|
||||
|
||||
Toggles: Toggles{
|
||||
|
@ -35,6 +37,8 @@ type GrpcServerConfig struct {
|
|||
Address string `json:"Address,omitempty"`
|
||||
// Port is the port on which the gRPC server will listen
|
||||
Port int `json:"Port,omitempty"`
|
||||
// ServicePort is the service port for the client to connect to the gRPC server
|
||||
ServicePort int `json:"ServicePort,omitempty"`
|
||||
}
|
||||
|
||||
type Config struct {
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
// Copyright 2018 Microsoft. All rights reserved.
|
||||
// MIT License
|
||||
package npm
|
||||
package controller
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
|
@ -9,6 +9,7 @@ import (
|
|||
npmconfig "github.com/Azure/azure-container-networking/npm/config"
|
||||
controllersv2 "github.com/Azure/azure-container-networking/npm/pkg/controlplane/controllers/v2"
|
||||
"github.com/Azure/azure-container-networking/npm/pkg/dataplane"
|
||||
"github.com/Azure/azure-container-networking/npm/pkg/models"
|
||||
"github.com/Azure/azure-container-networking/npm/pkg/transport"
|
||||
"github.com/pkg/errors"
|
||||
"k8s.io/apimachinery/pkg/version"
|
||||
|
@ -17,6 +18,8 @@ import (
|
|||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
var aiMetadata string //nolint // aiMetadata is set in Makefile
|
||||
|
||||
type NetworkPolicyServer struct {
|
||||
config npmconfig.Config
|
||||
|
||||
|
@ -25,20 +28,20 @@ type NetworkPolicyServer struct {
|
|||
|
||||
// Informers are the Kubernetes Informer
|
||||
// https://pkg.go.dev/k8s.io/client-go/informers
|
||||
Informers
|
||||
models.Informers
|
||||
|
||||
// Controllers for handling Kubernetes resource watcher events
|
||||
K8SControllersV2
|
||||
models.K8SControllersV2
|
||||
|
||||
// Azure-specific variables
|
||||
AzureConfig
|
||||
models.AzureConfig
|
||||
}
|
||||
|
||||
var (
|
||||
ErrInformerFactoryNil = errors.New("informer factory is nil")
|
||||
ErrTransportManagerNil = errors.New("transport manager is nil")
|
||||
ErrK8SServerVersionNil = errors.New("k8s server version is nil")
|
||||
ErrInformerSyncFailure = errors.New("informer sync failure")
|
||||
ErrInformerFactoryNil = errors.New("informer factory is nil")
|
||||
ErrTransportManagerNil = errors.New("transport manager is nil")
|
||||
ErrK8SServerVersionNil = errors.New("k8s server version is nil")
|
||||
ErrDataplaneNotInitialized = errors.New("dataplane is not initialized")
|
||||
)
|
||||
|
||||
func NewNetworkPolicyServer(
|
||||
|
@ -70,87 +73,87 @@ func NewNetworkPolicyServer(
|
|||
n := &NetworkPolicyServer{
|
||||
config: config,
|
||||
tm: mgr,
|
||||
Informers: Informers{
|
||||
informerFactory: informerFactory,
|
||||
podInformer: informerFactory.Core().V1().Pods(),
|
||||
nsInformer: informerFactory.Core().V1().Namespaces(),
|
||||
npInformer: informerFactory.Networking().V1().NetworkPolicies(),
|
||||
Informers: models.Informers{
|
||||
InformerFactory: informerFactory,
|
||||
PodInformer: informerFactory.Core().V1().Pods(),
|
||||
NsInformer: informerFactory.Core().V1().Namespaces(),
|
||||
NpInformer: informerFactory.Networking().V1().NetworkPolicies(),
|
||||
},
|
||||
AzureConfig: AzureConfig{
|
||||
k8sServerVersion: k8sServerVersion,
|
||||
NodeName: GetNodeName(),
|
||||
version: npmVersion,
|
||||
AzureConfig: models.AzureConfig{
|
||||
K8sServerVersion: k8sServerVersion,
|
||||
NodeName: models.GetNodeName(),
|
||||
Version: npmVersion,
|
||||
TelemetryEnabled: true,
|
||||
},
|
||||
}
|
||||
|
||||
n.npmNamespaceCacheV2 = &controllersv2.NpmNamespaceCache{NsMap: make(map[string]*controllersv2.Namespace)}
|
||||
n.podControllerV2 = controllersv2.NewPodController(n.podInformer, dp, n.npmNamespaceCacheV2)
|
||||
n.namespaceControllerV2 = controllersv2.NewNamespaceController(n.nsInformer, dp, n.npmNamespaceCacheV2)
|
||||
n.netPolControllerV2 = controllersv2.NewNetworkPolicyController(n.npInformer, dp)
|
||||
n.NpmNamespaceCacheV2 = &controllersv2.NpmNamespaceCache{NsMap: make(map[string]*controllersv2.Namespace)}
|
||||
n.PodControllerV2 = controllersv2.NewPodController(n.PodInformer, dp, n.NpmNamespaceCacheV2)
|
||||
n.NamespaceControllerV2 = controllersv2.NewNamespaceController(n.NsInformer, dp, n.NpmNamespaceCacheV2)
|
||||
n.NetPolControllerV2 = controllersv2.NewNetworkPolicyController(n.NpInformer, dp)
|
||||
|
||||
return n, nil
|
||||
}
|
||||
|
||||
func (n *NetworkPolicyServer) MarshalJSON() ([]byte, error) {
|
||||
m := map[CacheKey]json.RawMessage{}
|
||||
m := map[models.CacheKey]json.RawMessage{}
|
||||
|
||||
var npmNamespaceCacheRaw []byte
|
||||
var err error
|
||||
npmNamespaceCacheRaw, err = json.Marshal(n.npmNamespaceCacheV2)
|
||||
npmNamespaceCacheRaw, err = json.Marshal(n.NpmNamespaceCacheV2)
|
||||
|
||||
if err != nil {
|
||||
return nil, errors.Errorf("%s: %v", errMarshalNPMCache, err)
|
||||
return nil, errors.Errorf("%s: %v", models.ErrMarshalNPMCache, err)
|
||||
}
|
||||
m[NsMap] = npmNamespaceCacheRaw
|
||||
m[models.NsMap] = npmNamespaceCacheRaw
|
||||
|
||||
var podControllerRaw []byte
|
||||
podControllerRaw, err = json.Marshal(n.podControllerV2)
|
||||
podControllerRaw, err = json.Marshal(n.PodControllerV2)
|
||||
|
||||
if err != nil {
|
||||
return nil, errors.Errorf("%s: %v", errMarshalNPMCache, err)
|
||||
return nil, errors.Errorf("%s: %v", models.ErrMarshalNPMCache, err)
|
||||
}
|
||||
m[PodMap] = podControllerRaw
|
||||
m[models.PodMap] = podControllerRaw
|
||||
|
||||
nodeNameRaw, err := json.Marshal(n.NodeName)
|
||||
if err != nil {
|
||||
return nil, errors.Errorf("%s: %v", errMarshalNPMCache, err)
|
||||
return nil, errors.Errorf("%s: %v", models.ErrMarshalNPMCache, err)
|
||||
}
|
||||
m[NodeName] = nodeNameRaw
|
||||
m[models.NodeName] = nodeNameRaw
|
||||
|
||||
npmCacheRaw, err := json.Marshal(m)
|
||||
if err != nil {
|
||||
return nil, errors.Errorf("%s: %v", errMarshalNPMCache, err)
|
||||
return nil, errors.Errorf("%s: %v", models.ErrMarshalNPMCache, err)
|
||||
}
|
||||
|
||||
return npmCacheRaw, nil
|
||||
}
|
||||
|
||||
func (n *NetworkPolicyServer) GetAppVersion() string {
|
||||
return n.version
|
||||
return n.Version
|
||||
}
|
||||
|
||||
func (n *NetworkPolicyServer) Start(config npmconfig.Config, stopCh <-chan struct{}) error {
|
||||
// Starts all informers manufactured by n's informerFactory.
|
||||
n.informerFactory.Start(stopCh)
|
||||
// Starts all informers manufactured by n's InformerFactory.
|
||||
n.InformerFactory.Start(stopCh)
|
||||
|
||||
// Wait for the initial sync of local cache.
|
||||
if !cache.WaitForCacheSync(stopCh, n.podInformer.Informer().HasSynced) {
|
||||
return fmt.Errorf("Pod informer error: %w", ErrInformerSyncFailure)
|
||||
if !cache.WaitForCacheSync(stopCh, n.PodInformer.Informer().HasSynced) {
|
||||
return fmt.Errorf("Pod informer error: %w", models.ErrInformerSyncFailure)
|
||||
}
|
||||
|
||||
if !cache.WaitForCacheSync(stopCh, n.nsInformer.Informer().HasSynced) {
|
||||
return fmt.Errorf("Namespace informer error: %w", ErrInformerSyncFailure)
|
||||
if !cache.WaitForCacheSync(stopCh, n.NsInformer.Informer().HasSynced) {
|
||||
return fmt.Errorf("Namespace informer error: %w", models.ErrInformerSyncFailure)
|
||||
}
|
||||
|
||||
if !cache.WaitForCacheSync(stopCh, n.npInformer.Informer().HasSynced) {
|
||||
return fmt.Errorf("NetworkPolicy informer error: %w", ErrInformerSyncFailure)
|
||||
if !cache.WaitForCacheSync(stopCh, n.NpInformer.Informer().HasSynced) {
|
||||
return fmt.Errorf("NetworkPolicy informer error: %w", models.ErrInformerSyncFailure)
|
||||
}
|
||||
|
||||
// start v2 NPM controllers after synced
|
||||
go n.podControllerV2.Run(stopCh)
|
||||
go n.namespaceControllerV2.Run(stopCh)
|
||||
go n.netPolControllerV2.Run(stopCh)
|
||||
go n.PodControllerV2.Run(stopCh)
|
||||
go n.NamespaceControllerV2.Run(stopCh)
|
||||
go n.NetPolControllerV2.Run(stopCh)
|
||||
|
||||
// start the transport layer (gRPC) server
|
||||
// We block the main thread here until the server is stopped.
|
|
@ -1,9 +1,10 @@
|
|||
// Copyright 2018 Microsoft. All rights reserved.
|
||||
// MIT License
|
||||
package npm
|
||||
package daemon
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
npmconfig "github.com/Azure/azure-container-networking/npm/config"
|
||||
|
@ -12,6 +13,10 @@ import (
|
|||
"github.com/Azure/azure-container-networking/npm/pkg/transport"
|
||||
)
|
||||
|
||||
var aiMetadata string //nolint // aiMetadata is set in Makefile
|
||||
|
||||
var ErrDataplaneNotInitialized = errors.New("dataplane is not initialized")
|
||||
|
||||
type NetworkPolicyDaemon struct {
|
||||
ctx context.Context
|
||||
config npmconfig.Config
|
|
@ -0,0 +1,36 @@
|
|||
# Kustomize based deployment
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- [Kustomize](https://kustomize.io/) - Follow the instructions below to install it.
|
||||
|
||||
```terminal
|
||||
curl -s "https://raw.githubusercontent.com/kubernetes-sigs/kustomize/master/hack/install_kustomize.sh" | bash
|
||||
```
|
||||
|
||||
For other installation options refer to https://kubectl.docs.kubernetes.io/installation/kustomize.
|
||||
|
||||
To generate the resources for the **controller**, run the following command:
|
||||
|
||||
```terminal
|
||||
kustomize build overlays/controller > /tmp/controller.yaml
|
||||
```
|
||||
|
||||
## Deploying to the cluster
|
||||
|
||||
### NPM Controller
|
||||
|
||||
To generate the resources for the **daemon**, run the following command:
|
||||
|
||||
```terminal
|
||||
kustomize build overlays/daemon > /tmp/daemon.yaml
|
||||
```
|
||||
|
||||
### NPM Daemon
|
||||
|
||||
> `kustomize` is not required for this step, since it is already bundled in the `kubectl` binary.
|
||||
|
||||
To deploy the daemon to your cluster, run the following command:
|
||||
```terminal
|
||||
kubectl apply -k overlays/daemon
|
||||
```
|
|
@ -0,0 +1,25 @@
|
|||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: azure-npm-config
|
||||
namespace: kube-system
|
||||
data:
|
||||
azure-npm.json: |
|
||||
{
|
||||
"ResyncPeriodInMinutes": 15,
|
||||
"ListeningPort": 10091,
|
||||
"ListeningAddress": "0.0.0.0",
|
||||
"Toggles": {
|
||||
"EnablePrometheusMetrics": true,
|
||||
"EnablePprof": true,
|
||||
"EnableHTTPDebugAPI": true,
|
||||
"EnableV2NPM": false,
|
||||
"PlaceAzureChainFirst": false
|
||||
},
|
||||
"Transport": {
|
||||
"Address": "azure-npm.kube-system.svc.cluster.local"
|
||||
"Port": 10092,
|
||||
"ServicePort": 9001
|
||||
}
|
||||
}
|
|
@ -0,0 +1,7 @@
|
|||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
|
||||
resources:
|
||||
- configmap.yaml
|
||||
- serviceaccount.yaml
|
||||
- rbac.yaml
|
|
@ -0,0 +1,44 @@
|
|||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: azure-npm
|
||||
namespace: kube-system
|
||||
labels:
|
||||
addonmanager.kubernetes.io/mode: EnsureExists
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- pods
|
||||
- nodes
|
||||
- namespaces
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- networking.k8s.io
|
||||
resources:
|
||||
- networkpolicies
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: azure-npm-binding
|
||||
namespace: kube-system
|
||||
labels:
|
||||
addonmanager.kubernetes.io/mode: EnsureExists
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: azure-npm
|
||||
namespace: kube-system
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: azure-npm
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
---
|
|
@ -0,0 +1,7 @@
|
|||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: azure-npm
|
||||
namespace: kube-system
|
||||
labels:
|
||||
addonmanager.kubernetes.io/mode: EnsureExists
|
|
@ -0,0 +1,77 @@
|
|||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: azure-npm-controller
|
||||
namespace: kube-system
|
||||
labels:
|
||||
app: azure-npm
|
||||
component: controller
|
||||
addonmanager.kubernetes.io/mode: EnsureExists
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: azure-npm
|
||||
component: controller
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: azure-npm
|
||||
component: controller
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
azure.npm/scrapeable: ''
|
||||
spec:
|
||||
priorityClassName: system-node-critical
|
||||
tolerations:
|
||||
- operator: "Exists"
|
||||
effect: NoExecute
|
||||
- operator: "Exists"
|
||||
effect: NoSchedule
|
||||
- key: CriticalAddonsOnly
|
||||
operator: Exists
|
||||
containers:
|
||||
- name: azure-npm
|
||||
ports:
|
||||
- name: metrics
|
||||
containerPort: 10091
|
||||
- name: http
|
||||
containerPort: 10092
|
||||
image: azure-npm:v1.4.1
|
||||
command: ["azure-npm"]
|
||||
args: ["start", "controlplane"]
|
||||
resources:
|
||||
limits:
|
||||
cpu: 250m
|
||||
memory: 300Mi
|
||||
requests:
|
||||
cpu: 250m
|
||||
securityContext:
|
||||
privileged: true
|
||||
env:
|
||||
- name: HOSTNAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
apiVersion: v1
|
||||
fieldPath: spec.nodeName
|
||||
- name: NPM_CONFIG
|
||||
value: /etc/azure-npm/azure-npm.json
|
||||
volumeMounts:
|
||||
- name: log
|
||||
mountPath: /var/log
|
||||
- name: protocols
|
||||
mountPath: /etc/protocols
|
||||
- name: azure-npm-config
|
||||
mountPath: /etc/azure-npm
|
||||
volumes:
|
||||
- name: log
|
||||
hostPath:
|
||||
path: /var/log
|
||||
type: Directory
|
||||
- name: protocols
|
||||
hostPath:
|
||||
path: /etc/protocols
|
||||
type: File
|
||||
- name: azure-npm-config
|
||||
configMap:
|
||||
name: azure-npm-config
|
||||
serviceAccountName: azure-npm
|
|
@ -0,0 +1,7 @@
|
|||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
bases:
|
||||
- ../../base
|
||||
resources:
|
||||
- deployment.yaml
|
||||
- service.yaml
|
|
@ -0,0 +1,34 @@
|
|||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: npm-controller-metrics-cluster-service
|
||||
namespace: kube-system
|
||||
labels:
|
||||
app: azure-npm
|
||||
component: controller
|
||||
spec:
|
||||
selector:
|
||||
k8s-app: azure-npm
|
||||
component: controller
|
||||
ports:
|
||||
- port: 9000
|
||||
name: metrics
|
||||
targetPort: 10091
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: azure-npm
|
||||
namespace: kube-system
|
||||
labels:
|
||||
app: azure-npm
|
||||
component: controller
|
||||
spec:
|
||||
selector:
|
||||
k8s-app: azure-npm
|
||||
component: controller
|
||||
ports:
|
||||
- name: http
|
||||
port: 9001
|
||||
targetPort: 10092
|
|
@ -0,0 +1,90 @@
|
|||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: azure-npm-deamon
|
||||
namespace: kube-system
|
||||
labels:
|
||||
app: azure-npm
|
||||
component: daemon
|
||||
addonmanager.kubernetes.io/mode: EnsureExists
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: azure-npm
|
||||
component: daemon
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: azure-npm
|
||||
component: daemon
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
azure.npm/scrapeable: ''
|
||||
spec:
|
||||
priorityClassName: system-node-critical
|
||||
tolerations:
|
||||
- operator: "Exists"
|
||||
effect: NoExecute
|
||||
- operator: "Exists"
|
||||
effect: NoSchedule
|
||||
- key: CriticalAddonsOnly
|
||||
operator: Exists
|
||||
containers:
|
||||
- name: azure-npm
|
||||
ports:
|
||||
- name: metrics
|
||||
containerPort: 10091
|
||||
image: azure-npm:v1.4.1
|
||||
command: ["azure-npm"]
|
||||
args: ["start", "daemon"]
|
||||
resources:
|
||||
limits:
|
||||
cpu: 250m
|
||||
memory: 300Mi
|
||||
requests:
|
||||
cpu: 250m
|
||||
securityContext:
|
||||
privileged: true
|
||||
env:
|
||||
- name: HOSTNAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
apiVersion: v1
|
||||
fieldPath: spec.nodeName
|
||||
- name: NPM_CONFIG
|
||||
value: /etc/azure-npm/azure-npm.json
|
||||
- name: DEAMON_POD_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
- name: DEAMON_NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
volumeMounts:
|
||||
- name: log
|
||||
mountPath: /var/log
|
||||
- name: xtables-lock
|
||||
mountPath: /run/xtables.lock
|
||||
- name: protocols
|
||||
mountPath: /etc/protocols
|
||||
- name: azure-npm-config
|
||||
mountPath: /etc/azure-npm
|
||||
hostNetwork: true
|
||||
volumes:
|
||||
- name: log
|
||||
hostPath:
|
||||
path: /var/log
|
||||
type: Directory
|
||||
- name: xtables-lock
|
||||
hostPath:
|
||||
path: /run/xtables.lock
|
||||
type: File
|
||||
- name: protocols
|
||||
hostPath:
|
||||
path: /etc/protocols
|
||||
type: File
|
||||
- name: azure-npm-config
|
||||
configMap:
|
||||
name: azure-npm-config
|
||||
serviceAccountName: azure-npm
|
|
@ -0,0 +1,7 @@
|
|||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
bases:
|
||||
- ../../base
|
||||
resources:
|
||||
- deployment.yaml
|
||||
- service.yaml
|
|
@ -0,0 +1,16 @@
|
|||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: npm-deamon-metrics-cluster-service
|
||||
namespace: kube-system
|
||||
labels:
|
||||
app: azure-npm
|
||||
component: daemon
|
||||
spec:
|
||||
selector:
|
||||
k8s-app: azure-npm
|
||||
component: deamon
|
||||
ports:
|
||||
- name: metrics
|
||||
port: 9000
|
||||
targetPort: 10091
|
|
@ -0,0 +1,24 @@
|
|||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: azure-npm-config
|
||||
namespace: kube-system
|
||||
data:
|
||||
azure-npm.json: |
|
||||
{
|
||||
"ResyncPeriodInMinutes": 15,
|
||||
"ListeningPort": 10091,
|
||||
"ListeningAddress": "0.0.0.0",
|
||||
"Toggles": {
|
||||
"EnablePrometheusMetrics": true,
|
||||
"EnablePprof": true,
|
||||
"EnableHTTPDebugAPI": true,
|
||||
"EnableV2NPM": false,
|
||||
"PlaceAzureChainFirst": false
|
||||
},
|
||||
"Transport": {
|
||||
"Address": "azure-npm.kube-system.svc.cluster.local"
|
||||
"Port": 10092
|
||||
}
|
||||
}
|
|
@ -0,0 +1,7 @@
|
|||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: azure-npm
|
||||
namespace: kube-system
|
||||
labels:
|
||||
addonmanager.kubernetes.io/mode: EnsureExists
|
|
@ -0,0 +1,44 @@
|
|||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: azure-npm
|
||||
namespace: kube-system
|
||||
labels:
|
||||
addonmanager.kubernetes.io/mode: EnsureExists
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- pods
|
||||
- nodes
|
||||
- namespaces
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- networking.k8s.io
|
||||
resources:
|
||||
- networkpolicies
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: azure-npm-binding
|
||||
namespace: kube-system
|
||||
labels:
|
||||
addonmanager.kubernetes.io/mode: EnsureExists
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: azure-npm
|
||||
namespace: kube-system
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: azure-npm
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
---
|
|
@ -0,0 +1,190 @@
|
|||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
labels:
|
||||
addonmanager.kubernetes.io/mode: EnsureExists
|
||||
name: azure-npm
|
||||
namespace: kube-system
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
labels:
|
||||
addonmanager.kubernetes.io/mode: EnsureExists
|
||||
name: azure-npm
|
||||
namespace: kube-system
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- pods
|
||||
- nodes
|
||||
- namespaces
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- networking.k8s.io
|
||||
resources:
|
||||
- networkpolicies
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
addonmanager.kubernetes.io/mode: EnsureExists
|
||||
name: azure-npm-binding
|
||||
namespace: kube-system
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: azure-npm
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: azure-npm
|
||||
namespace: kube-system
|
||||
---
|
||||
apiVersion: v1
|
||||
data:
|
||||
azure-npm.json: |
|
||||
{
|
||||
"ResyncPeriodInMinutes": 15,
|
||||
"ListeningPort": 10091,
|
||||
"ListeningAddress": "0.0.0.0",
|
||||
"Toggles": {
|
||||
"EnablePrometheusMetrics": true,
|
||||
"EnablePprof": true,
|
||||
"EnableHTTPDebugAPI": true,
|
||||
"EnableV2NPM": false,
|
||||
"PlaceAzureChainFirst": false
|
||||
},
|
||||
"Transport": {
|
||||
"Address": "azure-npm.kube-system.svc.cluster.local",
|
||||
"Port": 19002,
|
||||
"ServicePort": 9001
|
||||
}
|
||||
}
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: azure-npm-config
|
||||
namespace: kube-system
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
app: azure-npm
|
||||
component: controller
|
||||
name: azure-npm
|
||||
namespace: kube-system
|
||||
spec:
|
||||
ports:
|
||||
- name: http
|
||||
port: 9001
|
||||
targetPort: 10092
|
||||
selector:
|
||||
component: controller
|
||||
k8s-app: azure-npm
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
app: azure-npm
|
||||
component: controller
|
||||
name: npm-controller-metrics-cluster-service
|
||||
namespace: kube-system
|
||||
spec:
|
||||
ports:
|
||||
- name: metrics
|
||||
port: 9000
|
||||
targetPort: 10091
|
||||
selector:
|
||||
component: controller
|
||||
k8s-app: azure-npm
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
addonmanager.kubernetes.io/mode: EnsureExists
|
||||
app: azure-npm
|
||||
component: controller
|
||||
name: azure-npm-controller
|
||||
namespace: kube-system
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
component: controller
|
||||
k8s-app: azure-npm
|
||||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
azure.npm/scrapeable: ""
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ""
|
||||
labels:
|
||||
component: controller
|
||||
k8s-app: azure-npm
|
||||
spec:
|
||||
containers:
|
||||
- args:
|
||||
- start
|
||||
- controlplane
|
||||
ports:
|
||||
- name: metrics
|
||||
containerPort: 10091
|
||||
- name: http
|
||||
containerPort: 10092
|
||||
command:
|
||||
- azure-npm
|
||||
env:
|
||||
- name: HOSTNAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
apiVersion: v1
|
||||
fieldPath: spec.nodeName
|
||||
- name: NPM_CONFIG
|
||||
value: /etc/azure-npm/azure-npm.json
|
||||
image: azure-npm:v1.4.1
|
||||
name: azure-npm
|
||||
resources:
|
||||
limits:
|
||||
cpu: 250m
|
||||
memory: 300Mi
|
||||
requests:
|
||||
cpu: 250m
|
||||
securityContext:
|
||||
privileged: true
|
||||
volumeMounts:
|
||||
- mountPath: /var/log
|
||||
name: log
|
||||
- mountPath: /etc/protocols
|
||||
name: protocols
|
||||
- mountPath: /etc/azure-npm
|
||||
name: azure-npm-config
|
||||
priorityClassName: system-node-critical
|
||||
serviceAccountName: azure-npm
|
||||
tolerations:
|
||||
- effect: NoExecute
|
||||
operator: Exists
|
||||
- effect: NoSchedule
|
||||
operator: Exists
|
||||
- key: CriticalAddonsOnly
|
||||
operator: Exists
|
||||
volumes:
|
||||
- hostPath:
|
||||
path: /var/log
|
||||
type: Directory
|
||||
name: log
|
||||
- hostPath:
|
||||
path: /etc/protocols
|
||||
type: File
|
||||
name: protocols
|
||||
- configMap:
|
||||
name: azure-npm-config
|
||||
name: azure-npm-config
|
|
@ -0,0 +1,186 @@
|
|||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
labels:
|
||||
addonmanager.kubernetes.io/mode: EnsureExists
|
||||
name: azure-npm
|
||||
namespace: kube-system
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
labels:
|
||||
addonmanager.kubernetes.io/mode: EnsureExists
|
||||
name: azure-npm
|
||||
namespace: kube-system
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- pods
|
||||
- nodes
|
||||
- namespaces
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- networking.k8s.io
|
||||
resources:
|
||||
- networkpolicies
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
addonmanager.kubernetes.io/mode: EnsureExists
|
||||
name: azure-npm-binding
|
||||
namespace: kube-system
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: azure-npm
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: azure-npm
|
||||
namespace: kube-system
|
||||
---
|
||||
apiVersion: v1
|
||||
data:
|
||||
azure-npm.json: |
|
||||
{
|
||||
"ResyncPeriodInMinutes": 15,
|
||||
"ListeningPort": 10091,
|
||||
"ListeningAddress": "0.0.0.0",
|
||||
"Toggles": {
|
||||
"EnablePrometheusMetrics": true,
|
||||
"EnablePprof": true,
|
||||
"EnableHTTPDebugAPI": true,
|
||||
"EnableV2NPM": false,
|
||||
"PlaceAzureChainFirst": false
|
||||
},
|
||||
"Transport": {
|
||||
"Address": "azure-npm.kube-system.svc.cluster.local",
|
||||
"Port": 10092,
|
||||
"ServicePort": 9001
|
||||
}
|
||||
}
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: azure-npm-config
|
||||
namespace: kube-system
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
app: azure-npm
|
||||
component: daemon
|
||||
name: npm-deamon-metrics-cluster-service
|
||||
namespace: kube-system
|
||||
spec:
|
||||
ports:
|
||||
- name: metrics
|
||||
port: 9000
|
||||
targetPort: 10091
|
||||
selector:
|
||||
component: deamon
|
||||
k8s-app: azure-npm
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
labels:
|
||||
addonmanager.kubernetes.io/mode: EnsureExists
|
||||
app: azure-npm
|
||||
component: daemon
|
||||
name: azure-npm-deamon
|
||||
namespace: kube-system
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
component: daemon
|
||||
k8s-app: azure-npm
|
||||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
azure.npm/scrapeable: ""
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ""
|
||||
labels:
|
||||
component: daemon
|
||||
k8s-app: azure-npm
|
||||
spec:
|
||||
containers:
|
||||
- args:
|
||||
- start
|
||||
- daemon
|
||||
command:
|
||||
- azure-npm
|
||||
env:
|
||||
- name: HOSTNAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
apiVersion: v1
|
||||
fieldPath: spec.nodeName
|
||||
- name: NPM_CONFIG
|
||||
value: /etc/azure-npm/azure-npm.json
|
||||
- name: DAEMON_POD_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
- name: DAEMON_NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
image: azure-npm:v1.4.1
|
||||
name: azure-npm
|
||||
ports:
|
||||
- name: metrics
|
||||
containerPort: 10091
|
||||
resources:
|
||||
limits:
|
||||
cpu: 250m
|
||||
memory: 300Mi
|
||||
requests:
|
||||
cpu: 250m
|
||||
securityContext:
|
||||
privileged: true
|
||||
volumeMounts:
|
||||
- mountPath: /var/log
|
||||
name: log
|
||||
- mountPath: /run/xtables.lock
|
||||
name: xtables-lock
|
||||
- mountPath: /etc/protocols
|
||||
name: protocols
|
||||
- mountPath: /etc/azure-npm
|
||||
name: azure-npm-config
|
||||
hostNetwork: true
|
||||
priorityClassName: system-node-critical
|
||||
serviceAccountName: azure-npm
|
||||
tolerations:
|
||||
- effect: NoExecute
|
||||
operator: Exists
|
||||
- effect: NoSchedule
|
||||
operator: Exists
|
||||
- key: CriticalAddonsOnly
|
||||
operator: Exists
|
||||
volumes:
|
||||
- hostPath:
|
||||
path: /var/log
|
||||
type: Directory
|
||||
name: log
|
||||
- hostPath:
|
||||
path: /run/xtables.lock
|
||||
type: File
|
||||
name: xtables-lock
|
||||
- hostPath:
|
||||
path: /etc/protocols
|
||||
type: File
|
||||
name: protocols
|
||||
- configMap:
|
||||
name: azure-npm-config
|
||||
name: azure-npm-config
|
|
@ -0,0 +1,164 @@
|
|||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: azure-npm
|
||||
namespace: kube-system
|
||||
labels:
|
||||
addonmanager.kubernetes.io/mode: EnsureExists
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: azure-npm
|
||||
namespace: kube-system
|
||||
labels:
|
||||
addonmanager.kubernetes.io/mode: EnsureExists
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- pods
|
||||
- nodes
|
||||
- namespaces
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- networking.k8s.io
|
||||
resources:
|
||||
- networkpolicies
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: azure-npm-binding
|
||||
namespace: kube-system
|
||||
labels:
|
||||
addonmanager.kubernetes.io/mode: EnsureExists
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: azure-npm
|
||||
namespace: kube-system
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: azure-npm
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: azure-npm
|
||||
namespace: kube-system
|
||||
labels:
|
||||
app: azure-npm
|
||||
addonmanager.kubernetes.io/mode: EnsureExists
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: azure-npm
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: azure-npm
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
azure.npm/scrapeable: ''
|
||||
spec:
|
||||
priorityClassName: system-node-critical
|
||||
tolerations:
|
||||
- operator: "Exists"
|
||||
effect: NoExecute
|
||||
- operator: "Exists"
|
||||
effect: NoSchedule
|
||||
- key: CriticalAddonsOnly
|
||||
operator: Exists
|
||||
containers:
|
||||
- name: azure-npm
|
||||
image: mcr.microsoft.com/containernetworking/azure-npm:v1.4.1
|
||||
resources:
|
||||
limits:
|
||||
cpu: 250m
|
||||
memory: 300Mi
|
||||
requests:
|
||||
cpu: 250m
|
||||
securityContext:
|
||||
privileged: true
|
||||
env:
|
||||
- name: HOSTNAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
apiVersion: v1
|
||||
fieldPath: spec.nodeName
|
||||
- name: NPM_CONFIG
|
||||
value: /etc/azure-npm/azure-npm.json
|
||||
volumeMounts:
|
||||
- name: log
|
||||
mountPath: /var/log
|
||||
- name: xtables-lock
|
||||
mountPath: /run/xtables.lock
|
||||
- name: protocols
|
||||
mountPath: /etc/protocols
|
||||
- name: azure-npm-config
|
||||
mountPath: /etc/azure-npm
|
||||
hostNetwork: true
|
||||
volumes:
|
||||
- name: log
|
||||
hostPath:
|
||||
path: /var/log
|
||||
type: Directory
|
||||
- name: xtables-lock
|
||||
hostPath:
|
||||
path: /run/xtables.lock
|
||||
type: File
|
||||
- name: protocols
|
||||
hostPath:
|
||||
path: /etc/protocols
|
||||
type: File
|
||||
- name: azure-npm-config
|
||||
configMap:
|
||||
name: azure-npm-config
|
||||
serviceAccountName: azure-npm
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: npm-metrics-cluster-service
|
||||
namespace: kube-system
|
||||
labels:
|
||||
app: npm-metrics
|
||||
spec:
|
||||
selector:
|
||||
k8s-app: azure-npm
|
||||
ports:
|
||||
- port: 9000
|
||||
targetPort: 10091
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: azure-npm-config
|
||||
namespace: kube-system
|
||||
data:
|
||||
azure-npm.json: |
|
||||
{
|
||||
"ResyncPeriodInMinutes": 15,
|
||||
"ListeningPort": 10091,
|
||||
"ListeningAddress": "0.0.0.0",
|
||||
"Toggles": {
|
||||
"EnablePrometheusMetrics": true,
|
||||
"EnablePprof": true,
|
||||
"EnableHTTPDebugAPI": true,
|
||||
"EnableV2NPM": false,
|
||||
"PlaceAzureChainFirst": false
|
||||
},
|
||||
"Transport": {
|
||||
"Address": "azure-npm.kube-system.svc.cluster.local",
|
||||
"Port": 19002,
|
||||
"ServicePort": 9001
|
||||
}
|
||||
}
|
|
@ -143,6 +143,11 @@ data:
|
|||
"EnableV2NPM": true,
|
||||
"PlaceAzureChainFirst": false,
|
||||
"ApplyIPSetsOnNeed": false
|
||||
},
|
||||
"Transport": {
|
||||
"Address": "azure-npm.kube-system.svc.cluster.local",
|
||||
"Port": 10092,
|
||||
"ServicePort": 9001
|
||||
}
|
||||
}
|
||||
|
||||
|
|
130
npm/npm.go
130
npm/npm.go
|
@ -5,13 +5,13 @@ package npm
|
|||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
npmconfig "github.com/Azure/azure-container-networking/npm/config"
|
||||
"github.com/Azure/azure-container-networking/npm/ipsm"
|
||||
controllersv1 "github.com/Azure/azure-container-networking/npm/pkg/controlplane/controllers/v1"
|
||||
controllersv2 "github.com/Azure/azure-container-networking/npm/pkg/controlplane/controllers/v2"
|
||||
"github.com/Azure/azure-container-networking/npm/pkg/dataplane"
|
||||
"github.com/Azure/azure-container-networking/npm/pkg/models"
|
||||
"github.com/pkg/errors"
|
||||
"k8s.io/apimachinery/pkg/version"
|
||||
"k8s.io/client-go/informers"
|
||||
|
@ -20,7 +20,30 @@ import (
|
|||
utilexec "k8s.io/utils/exec"
|
||||
)
|
||||
|
||||
var ErrDataplaneNotInitialized = errors.New("dataplane is not initialized")
|
||||
var aiMetadata string //nolint // aiMetadata is set in Makefile
|
||||
|
||||
// NetworkPolicyManager contains informers for pod, namespace and networkpolicy.
|
||||
type NetworkPolicyManager struct {
|
||||
config npmconfig.Config
|
||||
|
||||
// ipsMgr are shared in all controllers. Thus, only one ipsMgr is created for simple management
|
||||
// and uses lock to avoid unintentional race condictions in IpsetManager.
|
||||
ipsMgr *ipsm.IpsetManager
|
||||
|
||||
// Informers are the Kubernetes Informer
|
||||
// https://pkg.go.dev/k8s.io/client-go/informers
|
||||
models.Informers
|
||||
|
||||
// Legacy controllers for handling Kubernetes resource watcher events
|
||||
// To be deprecated
|
||||
models.K8SControllersV1
|
||||
|
||||
// Controllers for handling Kubernetes resource watcher events
|
||||
models.K8SControllersV2
|
||||
|
||||
// Azure-specific variables
|
||||
models.AzureConfig
|
||||
}
|
||||
|
||||
// NewNetworkPolicyManager creates a NetworkPolicyManager
|
||||
func NewNetworkPolicyManager(config npmconfig.Config,
|
||||
|
@ -33,93 +56,93 @@ func NewNetworkPolicyManager(config npmconfig.Config,
|
|||
|
||||
npMgr := &NetworkPolicyManager{
|
||||
config: config,
|
||||
Informers: Informers{
|
||||
informerFactory: informerFactory,
|
||||
podInformer: informerFactory.Core().V1().Pods(),
|
||||
nsInformer: informerFactory.Core().V1().Namespaces(),
|
||||
npInformer: informerFactory.Networking().V1().NetworkPolicies(),
|
||||
Informers: models.Informers{
|
||||
InformerFactory: informerFactory,
|
||||
PodInformer: informerFactory.Core().V1().Pods(),
|
||||
NsInformer: informerFactory.Core().V1().Namespaces(),
|
||||
NpInformer: informerFactory.Networking().V1().NetworkPolicies(),
|
||||
},
|
||||
AzureConfig: AzureConfig{
|
||||
k8sServerVersion: k8sServerVersion,
|
||||
NodeName: GetNodeName(),
|
||||
version: npmVersion,
|
||||
AzureConfig: models.AzureConfig{
|
||||
K8sServerVersion: k8sServerVersion,
|
||||
NodeName: models.GetNodeName(),
|
||||
Version: npmVersion,
|
||||
TelemetryEnabled: true,
|
||||
},
|
||||
}
|
||||
|
||||
// create v2 NPM specific components.
|
||||
if npMgr.config.Toggles.EnableV2NPM {
|
||||
npMgr.npmNamespaceCacheV2 = &controllersv2.NpmNamespaceCache{NsMap: make(map[string]*controllersv2.Namespace)}
|
||||
npMgr.podControllerV2 = controllersv2.NewPodController(npMgr.podInformer, dp, npMgr.npmNamespaceCacheV2)
|
||||
npMgr.namespaceControllerV2 = controllersv2.NewNamespaceController(npMgr.nsInformer, dp, npMgr.npmNamespaceCacheV2)
|
||||
npMgr.NpmNamespaceCacheV2 = &controllersv2.NpmNamespaceCache{NsMap: make(map[string]*controllersv2.Namespace)}
|
||||
npMgr.PodControllerV2 = controllersv2.NewPodController(npMgr.PodInformer, dp, npMgr.NpmNamespaceCacheV2)
|
||||
npMgr.NamespaceControllerV2 = controllersv2.NewNamespaceController(npMgr.NsInformer, dp, npMgr.NpmNamespaceCacheV2)
|
||||
// Question(jungukcho): Is config.Toggles.PlaceAzureChainFirst needed for v2?
|
||||
npMgr.netPolControllerV2 = controllersv2.NewNetworkPolicyController(npMgr.npInformer, dp)
|
||||
npMgr.NetPolControllerV2 = controllersv2.NewNetworkPolicyController(npMgr.NpInformer, dp)
|
||||
return npMgr
|
||||
}
|
||||
|
||||
// create v1 NPM specific components.
|
||||
npMgr.ipsMgr = ipsm.NewIpsetManager(exec)
|
||||
|
||||
npMgr.npmNamespaceCacheV1 = &controllersv1.NpmNamespaceCache{NsMap: make(map[string]*controllersv1.Namespace)}
|
||||
npMgr.podControllerV1 = controllersv1.NewPodController(npMgr.podInformer, npMgr.ipsMgr, npMgr.npmNamespaceCacheV1)
|
||||
npMgr.namespaceControllerV1 = controllersv1.NewNameSpaceController(npMgr.nsInformer, npMgr.ipsMgr, npMgr.npmNamespaceCacheV1)
|
||||
npMgr.netPolControllerV1 = controllersv1.NewNetworkPolicyController(npMgr.npInformer, npMgr.ipsMgr, config.Toggles.PlaceAzureChainFirst)
|
||||
npMgr.NpmNamespaceCacheV1 = &controllersv1.NpmNamespaceCache{NsMap: make(map[string]*controllersv1.Namespace)}
|
||||
npMgr.PodControllerV1 = controllersv1.NewPodController(npMgr.PodInformer, npMgr.ipsMgr, npMgr.NpmNamespaceCacheV1)
|
||||
npMgr.NamespaceControllerV1 = controllersv1.NewNameSpaceController(npMgr.NsInformer, npMgr.ipsMgr, npMgr.NpmNamespaceCacheV1)
|
||||
npMgr.NetPolControllerV1 = controllersv1.NewNetworkPolicyController(npMgr.NpInformer, npMgr.ipsMgr, config.Toggles.PlaceAzureChainFirst)
|
||||
return npMgr
|
||||
}
|
||||
|
||||
func (npMgr *NetworkPolicyManager) MarshalJSON() ([]byte, error) {
|
||||
m := map[CacheKey]json.RawMessage{}
|
||||
m := map[models.CacheKey]json.RawMessage{}
|
||||
|
||||
var npmNamespaceCacheRaw []byte
|
||||
var err error
|
||||
if npMgr.config.Toggles.EnableV2NPM {
|
||||
npmNamespaceCacheRaw, err = json.Marshal(npMgr.npmNamespaceCacheV2)
|
||||
npmNamespaceCacheRaw, err = json.Marshal(npMgr.NpmNamespaceCacheV2)
|
||||
} else {
|
||||
npmNamespaceCacheRaw, err = json.Marshal(npMgr.npmNamespaceCacheV1)
|
||||
npmNamespaceCacheRaw, err = json.Marshal(npMgr.NpmNamespaceCacheV1)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, errors.Errorf("%s: %v", errMarshalNPMCache, err)
|
||||
return nil, errors.Errorf("%s: %v", models.ErrMarshalNPMCache, err)
|
||||
}
|
||||
m[NsMap] = npmNamespaceCacheRaw
|
||||
m[models.NsMap] = npmNamespaceCacheRaw
|
||||
|
||||
var podControllerRaw []byte
|
||||
if npMgr.config.Toggles.EnableV2NPM {
|
||||
podControllerRaw, err = json.Marshal(npMgr.podControllerV2)
|
||||
podControllerRaw, err = json.Marshal(npMgr.PodControllerV2)
|
||||
} else {
|
||||
podControllerRaw, err = json.Marshal(npMgr.podControllerV1)
|
||||
podControllerRaw, err = json.Marshal(npMgr.PodControllerV1)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, errors.Errorf("%s: %v", errMarshalNPMCache, err)
|
||||
return nil, errors.Errorf("%s: %v", models.ErrMarshalNPMCache, err)
|
||||
}
|
||||
m[PodMap] = podControllerRaw
|
||||
m[models.PodMap] = podControllerRaw
|
||||
|
||||
// TODO(jungukcho): NPM debug may be broken.
|
||||
// Will fix it later after v2 controller and linux test if it is broken.
|
||||
if !npMgr.config.Toggles.EnableV2NPM && npMgr.ipsMgr != nil {
|
||||
listMapRaw, listMapMarshalErr := npMgr.ipsMgr.MarshalListMapJSON()
|
||||
if listMapMarshalErr != nil {
|
||||
return nil, errors.Errorf("%s: %v", errMarshalNPMCache, listMapMarshalErr)
|
||||
return nil, errors.Errorf("%s: %v", models.ErrMarshalNPMCache, listMapMarshalErr)
|
||||
}
|
||||
m[ListMap] = listMapRaw
|
||||
m[models.ListMap] = listMapRaw
|
||||
|
||||
setMapRaw, setMapMarshalErr := npMgr.ipsMgr.MarshalSetMapJSON()
|
||||
if setMapMarshalErr != nil {
|
||||
return nil, errors.Errorf("%s: %v", errMarshalNPMCache, setMapMarshalErr)
|
||||
return nil, errors.Errorf("%s: %v", models.ErrMarshalNPMCache, setMapMarshalErr)
|
||||
}
|
||||
m[SetMap] = setMapRaw
|
||||
m[models.SetMap] = setMapRaw
|
||||
}
|
||||
|
||||
nodeNameRaw, err := json.Marshal(npMgr.NodeName)
|
||||
if err != nil {
|
||||
return nil, errors.Errorf("%s: %v", errMarshalNPMCache, err)
|
||||
return nil, errors.Errorf("%s: %v", models.ErrMarshalNPMCache, err)
|
||||
}
|
||||
m[NodeName] = nodeNameRaw
|
||||
m[models.NodeName] = nodeNameRaw
|
||||
|
||||
npmCacheRaw, err := json.Marshal(m)
|
||||
if err != nil {
|
||||
return nil, errors.Errorf("%s: %v", errMarshalNPMCache, err)
|
||||
return nil, errors.Errorf("%s: %v", models.ErrMarshalNPMCache, err)
|
||||
}
|
||||
|
||||
return npmCacheRaw, nil
|
||||
|
@ -127,47 +150,47 @@ func (npMgr *NetworkPolicyManager) MarshalJSON() ([]byte, error) {
|
|||
|
||||
// GetAppVersion returns network policy manager app version
|
||||
func (npMgr *NetworkPolicyManager) GetAppVersion() string {
|
||||
return npMgr.version
|
||||
return npMgr.Version
|
||||
}
|
||||
|
||||
// Start starts shared informers and waits for the shared informer cache to sync.
|
||||
func (npMgr *NetworkPolicyManager) Start(config npmconfig.Config, stopCh <-chan struct{}) error {
|
||||
if !config.Toggles.EnableV2NPM {
|
||||
// Do initialization of data plane before starting syncup of each controller to avoid heavy call to api-server
|
||||
if err := npMgr.netPolControllerV1.ResetDataPlane(); err != nil {
|
||||
if err := npMgr.NetPolControllerV1.ResetDataPlane(); err != nil {
|
||||
return fmt.Errorf("Failed to initialized data plane with err %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Starts all informers manufactured by npMgr's informerFactory.
|
||||
npMgr.informerFactory.Start(stopCh)
|
||||
npMgr.InformerFactory.Start(stopCh)
|
||||
|
||||
// Wait for the initial sync of local cache.
|
||||
if !cache.WaitForCacheSync(stopCh, npMgr.podInformer.Informer().HasSynced) {
|
||||
return fmt.Errorf("Pod informer error: %w", ErrInformerSyncFailure)
|
||||
if !cache.WaitForCacheSync(stopCh, npMgr.PodInformer.Informer().HasSynced) {
|
||||
return fmt.Errorf("Pod informer error: %w", models.ErrInformerSyncFailure)
|
||||
}
|
||||
|
||||
if !cache.WaitForCacheSync(stopCh, npMgr.nsInformer.Informer().HasSynced) {
|
||||
return fmt.Errorf("Namespace informer error: %w", ErrInformerSyncFailure)
|
||||
if !cache.WaitForCacheSync(stopCh, npMgr.NsInformer.Informer().HasSynced) {
|
||||
return fmt.Errorf("Namespace informer error: %w", models.ErrInformerSyncFailure)
|
||||
}
|
||||
|
||||
if !cache.WaitForCacheSync(stopCh, npMgr.npInformer.Informer().HasSynced) {
|
||||
return fmt.Errorf("NetworkPolicy informer error: %w", ErrInformerSyncFailure)
|
||||
if !cache.WaitForCacheSync(stopCh, npMgr.NpInformer.Informer().HasSynced) {
|
||||
return fmt.Errorf("NetworkPolicy informer error: %w", models.ErrInformerSyncFailure)
|
||||
}
|
||||
|
||||
// start v2 NPM controllers after synced
|
||||
if config.Toggles.EnableV2NPM {
|
||||
go npMgr.podControllerV2.Run(stopCh)
|
||||
go npMgr.namespaceControllerV2.Run(stopCh)
|
||||
go npMgr.netPolControllerV2.Run(stopCh)
|
||||
go npMgr.PodControllerV2.Run(stopCh)
|
||||
go npMgr.NamespaceControllerV2.Run(stopCh)
|
||||
go npMgr.NetPolControllerV2.Run(stopCh)
|
||||
return nil
|
||||
}
|
||||
|
||||
// start v1 NPM controllers after synced
|
||||
go npMgr.podControllerV1.Run(stopCh)
|
||||
go npMgr.namespaceControllerV1.Run(stopCh)
|
||||
go npMgr.netPolControllerV1.Run(stopCh)
|
||||
go npMgr.netPolControllerV1.RunPeriodicTasks(stopCh)
|
||||
go npMgr.PodControllerV1.Run(stopCh)
|
||||
go npMgr.NamespaceControllerV1.Run(stopCh)
|
||||
go npMgr.NetPolControllerV1.Run(stopCh)
|
||||
go npMgr.NetPolControllerV1.RunPeriodicTasks(stopCh)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -176,8 +199,3 @@ func (npMgr *NetworkPolicyManager) Start(config npmconfig.Config, stopCh <-chan
|
|||
func GetAIMetadata() string {
|
||||
return aiMetadata
|
||||
}
|
||||
|
||||
func GetNodeName() string {
|
||||
nodeName := os.Getenv(EnvNodeName)
|
||||
return nodeName
|
||||
}
|
||||
|
|
|
@ -1,4 +1,6 @@
|
|||
package npm
|
||||
package models
|
||||
|
||||
import "os"
|
||||
|
||||
const (
|
||||
heartbeatIntervalInMinutes = 30 //nolint:unused,deadcode,varcheck // ignore this error
|
||||
|
@ -14,3 +16,8 @@ const (
|
|||
|
||||
EnvNodeName = "HOSTNAME"
|
||||
)
|
||||
|
||||
func GetNodeName() string {
|
||||
nodeName := os.Getenv(EnvNodeName)
|
||||
return nodeName
|
||||
}
|
|
@ -0,0 +1,53 @@
|
|||
// Copyright 2018 Microsoft. All rights reserved.
|
||||
// MIT License
|
||||
package models
|
||||
|
||||
import (
|
||||
controllersv1 "github.com/Azure/azure-container-networking/npm/pkg/controlplane/controllers/v1"
|
||||
controllersv2 "github.com/Azure/azure-container-networking/npm/pkg/controlplane/controllers/v2"
|
||||
"github.com/pkg/errors"
|
||||
"k8s.io/apimachinery/pkg/version"
|
||||
"k8s.io/client-go/informers"
|
||||
coreinformers "k8s.io/client-go/informers/core/v1"
|
||||
networkinginformers "k8s.io/client-go/informers/networking/v1"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrMarshalNPMCache = errors.New("failed to marshal NPM Cache")
|
||||
ErrInformerSyncFailure = errors.New("informer sync failure")
|
||||
)
|
||||
|
||||
// Cache is the cache lookup key for the NPM cache
|
||||
type CacheKey string
|
||||
|
||||
// K8SControllerV1 are the legacy k8s controllers
|
||||
type K8SControllersV1 struct {
|
||||
PodControllerV1 *controllersv1.PodController //nolint:structcheck //ignore this error
|
||||
NamespaceControllerV1 *controllersv1.NamespaceController //nolint:structcheck // false lint error
|
||||
NpmNamespaceCacheV1 *controllersv1.NpmNamespaceCache //nolint:structcheck // false lint error
|
||||
NetPolControllerV1 *controllersv1.NetworkPolicyController //nolint:structcheck // false lint error
|
||||
}
|
||||
|
||||
// K8SControllerV2 are the optimized k8s controllers that replace the legacy controllers
|
||||
type K8SControllersV2 struct {
|
||||
PodControllerV2 *controllersv2.PodController //nolint:structcheck //ignore this error
|
||||
NamespaceControllerV2 *controllersv2.NamespaceController //nolint:structcheck // false lint error
|
||||
NpmNamespaceCacheV2 *controllersv2.NpmNamespaceCache //nolint:structcheck // false lint error
|
||||
NetPolControllerV2 *controllersv2.NetworkPolicyController //nolint:structcheck // false lint error
|
||||
}
|
||||
|
||||
// Informers are the informers for the k8s controllers
|
||||
type Informers struct {
|
||||
InformerFactory informers.SharedInformerFactory //nolint:structcheck //ignore this error
|
||||
PodInformer coreinformers.PodInformer //nolint:structcheck // false lint error
|
||||
NsInformer coreinformers.NamespaceInformer //nolint:structcheck // false lint error
|
||||
NpInformer networkinginformers.NetworkPolicyInformer //nolint:structcheck // false lint error
|
||||
}
|
||||
|
||||
// AzureConfig captures the Azure specific configurations and fields
|
||||
type AzureConfig struct {
|
||||
K8sServerVersion *version.Info
|
||||
NodeName string
|
||||
Version string
|
||||
TelemetryEnabled bool
|
||||
}
|
|
@ -35,7 +35,9 @@ func NewEventsClient(ctx context.Context, pod, node, addr string) (*EventsClient
|
|||
return nil, ErrAddressNil
|
||||
}
|
||||
|
||||
klog.Infof("Connecting to NPM controller gRPC server at address %s\n", addr)
|
||||
// TODO Make this secure
|
||||
// TODO Remove WithBlock option post testing
|
||||
cc, err := grpc.DialContext(ctx, addr, grpc.WithInsecure(), grpc.WithBlock())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to dial %s: %w", addr, err)
|
||||
|
|
|
@ -134,8 +134,8 @@ func (m *EventsServer) start(stopCh <-chan struct{}) error {
|
|||
}
|
||||
|
||||
func (m *EventsServer) handle() error {
|
||||
klog.Info("Starting transport manager listener")
|
||||
lis, err := net.Listen("tcp", fmt.Sprintf("localhost:%d", m.port))
|
||||
klog.Infof("Starting transport manager listener on port %v", m.port)
|
||||
lis, err := net.Listen("tcp", fmt.Sprintf(":%d", m.port))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to handle server connections: %w", err)
|
||||
}
|
||||
|
|
78
npm/types.go
78
npm/types.go
|
@ -1,78 +0,0 @@
|
|||
// Copyright 2018 Microsoft. All rights reserved.
|
||||
// MIT License
|
||||
package npm
|
||||
|
||||
import (
|
||||
npmconfig "github.com/Azure/azure-container-networking/npm/config"
|
||||
"github.com/Azure/azure-container-networking/npm/ipsm"
|
||||
controllersv1 "github.com/Azure/azure-container-networking/npm/pkg/controlplane/controllers/v1"
|
||||
controllersv2 "github.com/Azure/azure-container-networking/npm/pkg/controlplane/controllers/v2"
|
||||
"github.com/pkg/errors"
|
||||
"k8s.io/apimachinery/pkg/version"
|
||||
"k8s.io/client-go/informers"
|
||||
coreinformers "k8s.io/client-go/informers/core/v1"
|
||||
networkinginformers "k8s.io/client-go/informers/networking/v1"
|
||||
)
|
||||
|
||||
var (
|
||||
aiMetadata string
|
||||
errMarshalNPMCache = errors.New("failed to marshal NPM Cache")
|
||||
)
|
||||
|
||||
// NetworkPolicyManager contains informers for pod, namespace and networkpolicy.
|
||||
type NetworkPolicyManager struct {
|
||||
config npmconfig.Config
|
||||
|
||||
// ipsMgr are shared in all controllers. Thus, only one ipsMgr is created for simple management
|
||||
// and uses lock to avoid unintentional race condictions in IpsetManager.
|
||||
ipsMgr *ipsm.IpsetManager
|
||||
|
||||
// Informers are the Kubernetes Informer
|
||||
// https://pkg.go.dev/k8s.io/client-go/informers
|
||||
Informers
|
||||
|
||||
// Legacy controllers for handling Kubernetes resource watcher events
|
||||
// To be deprecated
|
||||
K8SControllersV1
|
||||
|
||||
// Controllers for handling Kubernetes resource watcher events
|
||||
K8SControllersV2
|
||||
|
||||
// Azure-specific variables
|
||||
AzureConfig
|
||||
}
|
||||
|
||||
// Cache is the cache lookup key for the NPM cache
|
||||
type CacheKey string
|
||||
|
||||
// K8SControllerV1 are the legacy k8s controllers
|
||||
type K8SControllersV1 struct {
|
||||
podControllerV1 *controllersv1.PodController //nolint:structcheck //ignore this error
|
||||
namespaceControllerV1 *controllersv1.NamespaceController //nolint:structcheck // false lint error
|
||||
npmNamespaceCacheV1 *controllersv1.NpmNamespaceCache //nolint:structcheck // false lint error
|
||||
netPolControllerV1 *controllersv1.NetworkPolicyController //nolint:structcheck // false lint error
|
||||
}
|
||||
|
||||
// K8SControllerV2 are the optimized k8s controllers that replace the legacy controllers
|
||||
type K8SControllersV2 struct {
|
||||
podControllerV2 *controllersv2.PodController //nolint:structcheck //ignore this error
|
||||
namespaceControllerV2 *controllersv2.NamespaceController //nolint:structcheck // false lint error
|
||||
npmNamespaceCacheV2 *controllersv2.NpmNamespaceCache //nolint:structcheck // false lint error
|
||||
netPolControllerV2 *controllersv2.NetworkPolicyController //nolint:structcheck // false lint error
|
||||
}
|
||||
|
||||
// Informers are the informers for the k8s controllers
|
||||
type Informers struct {
|
||||
informerFactory informers.SharedInformerFactory //nolint:structcheck //ignore this error
|
||||
podInformer coreinformers.PodInformer //nolint:structcheck // false lint error
|
||||
nsInformer coreinformers.NamespaceInformer //nolint:structcheck // false lint error
|
||||
npInformer networkinginformers.NetworkPolicyInformer //nolint:structcheck // false lint error
|
||||
}
|
||||
|
||||
// AzureConfig captures the Azure specific configurations and fields
|
||||
type AzureConfig struct {
|
||||
k8sServerVersion *version.Info
|
||||
NodeName string
|
||||
version string
|
||||
TelemetryEnabled bool
|
||||
}
|
Загрузка…
Ссылка в новой задаче