fix: move CNS to distroless-iptables base image (#2661)

* fix: move cns to distroless base image
with iptables

Signed-off-by: Evan Baker <rbtr@users.noreply.github.com>

* fix: add debug container to CNS Pod for tests

Signed-off-by: GitHub <noreply@github.com>

---------

Signed-off-by: Evan Baker <rbtr@users.noreply.github.com>
Signed-off-by: GitHub <noreply@github.com>
This commit is contained in:
Evan Baker 2024-06-14 10:04:02 -07:00 коммит произвёл GitHub
Родитель 49f4c56881
Коммит 7d2a7d7b97
Не найден ключ, соответствующий данной подписи
Идентификатор ключа GPG: B5690EEEBB952194
11 изменённых файлов: 235 добавлений и 64 удалений

Просмотреть файл

@ -205,7 +205,7 @@ stages:
cd hack/scripts
chmod +x async-delete-test.sh
./async-delete-test.sh
if ! [ -z $(kubectl -n kube-system get ds azure-cns | grep non-existing) ]; then
if ! [ -z $(kubectl -n kube-system get ds azure-cns | grep non-existing) ]; then
kubectl -n kube-system patch daemonset azure-cns --type json -p='[{"op": "remove", "path": "/spec/template/spec/nodeSelector/non-existing"}]'
fi
name: "testAsyncDelete"

Просмотреть файл

@ -1,4 +1,13 @@
FROM mcr.microsoft.com/oss/go/microsoft/golang:1.21 AS builder
# mcr.microsoft.com/oss/go/microsoft/golang:1.22.3-1-cbl-mariner2.0
FROM mcr.microsoft.com/oss/go/microsoft/golang@sha256:8253def0216b87b2994b7ad689aeec7440f6eb67f981e438071d8d67e36ff69f as golang
# mcr.microsoft.com/cbl-mariner/base/core:2.0
FROM mcr.microsoft.com/cbl-mariner/base/core@sha256:77651116f2e83cf50fddd8a0316945499f8ce6521ff8e94e67539180d1e5975a as mariner-core
# mcr.microsoft.com/cbl-mariner/distroless/minimal:2.0
FROM mcr.microsoft.com/cbl-mariner/distroless/minimal@sha256:63a0a70ceaa1320bc6eb98b81106667d43e46b674731ea8d28e4de1b87e0747f as mariner-distroless
FROM golang AS builder
ARG VERSION
ARG CNS_AI_PATH
ARG CNS_AI_ID
@ -7,13 +16,13 @@ COPY . .
RUN CGO_ENABLED=0 go build -a -o /usr/local/bin/azure-cns -ldflags "-X main.version="$VERSION" -X "$CNS_AI_PATH"="$CNS_AI_ID"" -gcflags="-dwarflocationlists=true" cns/service/*.go
RUN CGO_ENABLED=0 go build -a -o /usr/local/bin/azure-vnet-telemetry -ldflags "-X main.version="$VERSION"" -gcflags="-dwarflocationlists=true" cni/telemetry/service/*.go
FROM mcr.microsoft.com/cbl-mariner/base/core:2.0
RUN tdnf upgrade -y && tdnf install -y ca-certificates iptables
COPY --from=builder /etc/passwd /etc/passwd
COPY --from=builder /etc/group /etc/group
FROM mariner-core as iptables
RUN tdnf install -y iptables
FROM mariner-distroless
COPY --from=iptables /usr/sbin/*tables* /usr/sbin/
COPY --from=iptables /usr/lib /usr/lib
COPY --from=builder /usr/local/bin/azure-cns \
/usr/local/bin/azure-cns
COPY --from=builder /usr/local/bin/azure-vnet-telemetry \
/usr/local/bin/azure-vnet-telemetry
ENTRYPOINT [ "/usr/local/bin/azure-cns" ]
EXPOSE 10090

Просмотреть файл

@ -24,11 +24,11 @@ do
echo "check directory for pending delete"
cns_pod=$(kubectl get pods -l k8s-app=azure-cns -n kube-system -o wide | grep "$node_name" | awk '{print $1}')
file=$(kubectl exec -it $cns_pod -n kube-system -- ls var/run/azure-vnet/deleteIDs)
file=$(kubectl exec -it $cns_pod -c debug -n kube-system -- ls var/run/azure-vnet/deleteIDs)
if [ -z $file ]; then
while [ -z $file ];
do
file=$(kubectl exec -i $cns_pod -n kube-system -- ls var/run/azure-vnet/deleteIDs)
file=$(kubectl exec -i $cns_pod -c debug -n kube-system -- ls var/run/azure-vnet/deleteIDs)
done
fi
echo "pending deletes"
@ -37,7 +37,7 @@ do
echo "wait 30s for filesystem delete to occur"
sleep 30s
echo "check directory is now empty"
check_directory=$(kubectl exec -i $cns_pod -n kube-system -- ls var/run/azure-vnet/deleteIDs)
check_directory=$(kubectl exec -i $cns_pod -c debug -n kube-system -- ls var/run/azure-vnet/deleteIDs)
if [ -z $check_directory ]; then
echo "async delete success"
break

Просмотреть файл

@ -83,6 +83,33 @@ spec:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
- name: debug
image: mcr.microsoft.com/cbl-mariner/base/core:2.0
imagePullPolicy: IfNotPresent
command: ["sleep", "3600"]
securityContext:
capabilities:
add:
- NET_ADMIN
volumeMounts:
- name: log
mountPath: /var/log
- name: cns-state
mountPath: /var/lib/azure-network
- name: azure-endpoints
mountPath: /var/run/azure-cns/
- name: cns-config
mountPath: /etc/azure-cns
- name: cni-bin
mountPath: /opt/cni/bin
- name: azure-vnet
mountPath: /var/run/azure-vnet
- name: legacy-cni-state
mountPath: /var/run/azure-vnet.json
- name: xtables-lock
mountPath: /run/xtables.lock
- name: cni-conflist
mountPath: /etc/cni/net.d
initContainers:
- name: cni-installer
image: acnpublic.azurecr.io/cni-dropgz:latest

Просмотреть файл

@ -154,7 +154,7 @@ func TestSwiftv2PodToPod(t *testing.T) {
for _, pod := range allPods.Items {
for _, ip := range ipsToPing {
t.Logf("ping from pod %q to %q", pod.Name, ip)
result := podTest(t, ctx, clientset, pod, []string{"ping", "-c", "3", ip}, restConfig)
result := podTest(t, ctx, clientset, pod, "", []string{"ping", "-c", "3", ip}, restConfig)
if result != nil {
t.Errorf("ping %q failed: error: %s", ip, result)
}
@ -163,8 +163,8 @@ func TestSwiftv2PodToPod(t *testing.T) {
return
}
func podTest(t *testing.T, ctx context.Context, clientset *kuberneteslib.Clientset, srcPod v1.Pod, cmd []string, rc *restclient.Config) error {
output, err := kubernetes.ExecCmdOnPod(ctx, clientset, srcPod.Namespace, srcPod.Name, cmd, rc)
func podTest(t *testing.T, ctx context.Context, clientset *kuberneteslib.Clientset, srcPod v1.Pod, container string, cmd []string, rc *restclient.Config) error {
output, err := kubernetes.ExecCmdOnPod(ctx, clientset, srcPod.Namespace, srcPod.Name, container, cmd, rc)
t.Logf(string(output))
if err != nil {
t.Errorf("failed to execute command on pod: %v", srcPod.Name)

Просмотреть файл

@ -19,7 +19,7 @@ var ipv6PrefixPolicy = []string{"powershell", "-c", "curl.exe", "-6", "-v", "www
func podTest(ctx context.Context, clientset *kubernetes.Clientset, srcPod *apiv1.Pod, cmd []string, rc *restclient.Config, passFunc func(string) error) error {
logrus.Infof("podTest() - %v %v", srcPod.Name, cmd)
output, err := acnk8s.ExecCmdOnPod(ctx, clientset, srcPod.Namespace, srcPod.Name, cmd, rc)
output, err := acnk8s.ExecCmdOnPod(ctx, clientset, srcPod.Namespace, srcPod.Name, "", cmd, rc)
if err != nil {
return errors.Wrapf(err, "failed to execute command on pod: %v", srcPod.Name)
}

Просмотреть файл

@ -427,7 +427,7 @@ func writeToFile(dir, fileName, str string) error {
return errors.Wrap(err, "failed to write string")
}
func ExecCmdOnPod(ctx context.Context, clientset *kubernetes.Clientset, namespace, podName string, cmd []string, config *rest.Config) ([]byte, error) {
func ExecCmdOnPod(ctx context.Context, clientset *kubernetes.Clientset, namespace, podName, containerName string, cmd []string, config *rest.Config) ([]byte, error) {
var result []byte
execCmdOnPod := func() error {
req := clientset.CoreV1().RESTClient().Post().
@ -436,11 +436,12 @@ func ExecCmdOnPod(ctx context.Context, clientset *kubernetes.Clientset, namespac
Namespace(namespace).
SubResource("exec").
VersionedParams(&corev1.PodExecOptions{
Command: cmd,
Stdin: false,
Stdout: true,
Stderr: true,
TTY: false,
Command: cmd,
Container: containerName,
Stdin: false,
Stdout: true,
Stderr: true,
TTY: false,
}, scheme.ParameterCodec)
exec, err := remotecommand.NewSPDYExecutor(config, "POST", req.URL())
@ -582,7 +583,7 @@ func RestartKubeProxyService(ctx context.Context, clientset *kubernetes.Clientse
}
privilegedPod := pod.Items[0]
// exec into the pod and restart kubeproxy
_, err = ExecCmdOnPod(ctx, clientset, privilegedNamespace, privilegedPod.Name, restartKubeProxyCmd, config)
_, err = ExecCmdOnPod(ctx, clientset, privilegedNamespace, privilegedPod.Name, "", restartKubeProxyCmd, config)
if err != nil {
return errors.Wrapf(err, "failed to exec into privileged pod %s on node %s", privilegedPod.Name, node.Name)
}

Просмотреть файл

@ -77,7 +77,7 @@ func MustCreateDaemonset(ctx context.Context, daemonsets typedappsv1.DaemonSetIn
MustDeleteDaemonset(ctx, daemonsets, ds)
log.Printf("Creating Daemonset %v", ds.Name)
if _, err := daemonsets.Create(ctx, &ds, metav1.CreateOptions{}); err != nil {
panic(errors.Wrap(err, "failed to create daemonset"))
log.Fatal(errors.Wrap(err, "failed to create daemonset"))
}
}
@ -85,79 +85,79 @@ func MustCreateDeployment(ctx context.Context, deployments typedappsv1.Deploymen
MustDeleteDeployment(ctx, deployments, d)
log.Printf("Creating Deployment %v", d.Name)
if _, err := deployments.Create(ctx, &d, metav1.CreateOptions{}); err != nil {
panic(errors.Wrap(err, "failed to create deployment"))
log.Fatal(errors.Wrap(err, "failed to create deployment"))
}
}
func mustCreateServiceAccount(ctx context.Context, svcAccounts typedcorev1.ServiceAccountInterface, s corev1.ServiceAccount) {
if err := svcAccounts.Delete(ctx, s.Name, metav1.DeleteOptions{}); err != nil {
if !apierrors.IsNotFound(err) {
panic(errors.Wrap(err, "failed to delete svc account"))
log.Fatal(errors.Wrap(err, "failed to delete svc account"))
}
}
log.Printf("Creating ServiceAccount %v", s.Name)
if _, err := svcAccounts.Create(ctx, &s, metav1.CreateOptions{}); err != nil {
panic(errors.Wrap(err, "failed to create svc account"))
log.Fatal(errors.Wrap(err, "failed to create svc account"))
}
}
func mustCreateClusterRole(ctx context.Context, clusterRoles typedrbacv1.ClusterRoleInterface, cr rbacv1.ClusterRole) {
if err := clusterRoles.Delete(ctx, cr.Name, metav1.DeleteOptions{}); err != nil {
if !apierrors.IsNotFound(err) {
panic(errors.Wrap(err, "failed to delete cluster role"))
log.Fatal(errors.Wrap(err, "failed to delete cluster role"))
}
}
log.Printf("Creating ClusterRoles %v", cr.Name)
if _, err := clusterRoles.Create(ctx, &cr, metav1.CreateOptions{}); err != nil {
panic(errors.Wrap(err, "failed to create cluster role"))
log.Fatal(errors.Wrap(err, "failed to create cluster role"))
}
}
func mustCreateClusterRoleBinding(ctx context.Context, crBindings typedrbacv1.ClusterRoleBindingInterface, crb rbacv1.ClusterRoleBinding) {
if err := crBindings.Delete(ctx, crb.Name, metav1.DeleteOptions{}); err != nil {
if !apierrors.IsNotFound(err) {
panic(errors.Wrap(err, "failed to delete cluster role binding"))
log.Fatal(errors.Wrap(err, "failed to delete cluster role binding"))
}
}
log.Printf("Creating RoleBinding %v", crb.Name)
if _, err := crBindings.Create(ctx, &crb, metav1.CreateOptions{}); err != nil {
panic(errors.Wrap(err, "failed to create role binding"))
log.Fatal(errors.Wrap(err, "failed to create role binding"))
}
}
func mustCreateRole(ctx context.Context, rs typedrbacv1.RoleInterface, r rbacv1.Role) {
if err := rs.Delete(ctx, r.Name, metav1.DeleteOptions{}); err != nil {
if !apierrors.IsNotFound(err) {
panic(errors.Wrap(err, "failed to delete role"))
log.Fatal(errors.Wrap(err, "failed to delete role"))
}
}
log.Printf("Creating Role %v", r.Name)
if _, err := rs.Create(ctx, &r, metav1.CreateOptions{}); err != nil {
panic(errors.Wrap(err, "failed to create role"))
log.Fatal(errors.Wrap(err, "failed to create role"))
}
}
func mustCreateRoleBinding(ctx context.Context, rbi typedrbacv1.RoleBindingInterface, rb rbacv1.RoleBinding) {
if err := rbi.Delete(ctx, rb.Name, metav1.DeleteOptions{}); err != nil {
if !apierrors.IsNotFound(err) {
panic(errors.Wrap(err, "failed to delete role binding"))
log.Fatal(errors.Wrap(err, "failed to delete role binding"))
}
}
log.Printf("Creating RoleBinding %v", rb.Name)
if _, err := rbi.Create(ctx, &rb, metav1.CreateOptions{}); err != nil {
panic(errors.Wrap(err, "failed to create role binding"))
log.Fatal(errors.Wrap(err, "failed to create role binding"))
}
}
func mustCreateConfigMap(ctx context.Context, cmi typedcorev1.ConfigMapInterface, cm corev1.ConfigMap) {
if err := cmi.Delete(ctx, cm.Name, metav1.DeleteOptions{}); err != nil {
if !apierrors.IsNotFound(err) {
panic(errors.Wrap(err, "failed to delete configmap"))
log.Fatal(errors.Wrap(err, "failed to delete configmap"))
}
}
log.Printf("Creating ConfigMap %v", cm.Name)
if _, err := cmi.Create(ctx, &cm, metav1.CreateOptions{}); err != nil {
panic(errors.Wrap(err, "failed to create configmap"))
log.Fatal(errors.Wrap(err, "failed to create configmap"))
}
}
@ -177,7 +177,7 @@ func MustScaleDeployment(ctx context.Context,
log.Printf("Waiting for pods to be ready..")
err := WaitForPodDeployment(ctx, clientset, namespace, deployment.Name, podLabelSelector, replicas)
if err != nil {
panic(errors.Wrap(err, "failed to wait for pod deployment"))
log.Fatal(errors.Wrap(err, "failed to wait for pod deployment"))
}
}
}
@ -189,7 +189,7 @@ func MustCreateNamespace(ctx context.Context, clienset *kubernetes.Clientset, na
},
}, metav1.CreateOptions{})
if err != nil {
panic(errors.Wrapf(err, "failed to create namespace %v", namespace))
log.Fatal(errors.Wrapf(err, "failed to create namespace %v", namespace))
}
}
@ -614,6 +614,15 @@ func hostPathTypePtr(h corev1.HostPathType) *corev1.HostPathType {
func volumesForAzureCNIOverlayLinux() []corev1.Volume {
return []corev1.Volume{
{
Name: "azure-endpoints",
VolumeSource: corev1.VolumeSource{
HostPath: &corev1.HostPathVolumeSource{
Path: "/var/run/azure-cns/",
Type: hostPathTypePtr(corev1.HostPathDirectoryOrCreate),
},
},
},
{
Name: "log",
VolumeSource: corev1.VolumeSource{
@ -687,6 +696,15 @@ func volumesForAzureCNIOverlayLinux() []corev1.Volume {
},
},
},
{
Name: "xtables-lock",
VolumeSource: corev1.VolumeSource{
HostPath: &corev1.HostPathVolumeSource{
Path: "/run/xtables.lock",
Type: hostPathTypePtr(corev1.HostPathFile),
},
},
},
}
}

Просмотреть файл

@ -12,7 +12,7 @@ import (
)
const (
cnsLabelSelector = "k8s-app=azure-cns"
validatorPod = "k8s-app=azure-cns"
ciliumLabelSelector = "k8s-app=cilium"
overlayClusterLabelName = "overlay"
)
@ -30,26 +30,104 @@ type stateFileIpsFunc func([]byte) (map[string]string, error)
var linuxChecksMap = map[string][]check{
"cilium": {
{"cns", cnsManagedStateFileIps, cnsLabelSelector, privilegedNamespace, cnsManagedStateFileCmd}, // cns configmap "ManageEndpointState": true, | Endpoints managed in CNS State File
{"cilium", ciliumStateFileIps, ciliumLabelSelector, privilegedNamespace, ciliumStateFileCmd},
{"cns cache", cnsCacheStateFileIps, cnsLabelSelector, privilegedNamespace, cnsCachedAssignedIPStateCmd},
{
name: "cns",
stateFileIPs: cnsManagedStateFileIps,
podLabelSelector: validatorPod,
podNamespace: privilegedNamespace,
containerName: "debug",
cmd: cnsManagedStateFileCmd,
}, // cns configmap "ManageEndpointState": true, | Endpoints managed in CNS State File
{
name: "cilium",
stateFileIPs: ciliumStateFileIps,
podLabelSelector: ciliumLabelSelector,
podNamespace: privilegedNamespace,
cmd: ciliumStateFileCmd,
},
{
name: "cns cache",
stateFileIPs: cnsCacheStateFileIps,
podLabelSelector: validatorPod,
podNamespace: privilegedNamespace,
containerName: "debug",
cmd: cnsCachedAssignedIPStateCmd,
},
},
"cniv1": {
{"azure-vnet", azureVnetStateIps, privilegedLabelSelector, privilegedNamespace, azureVnetStateFileCmd},
{"azure-vnet-ipam", azureVnetIpamStateIps, privilegedLabelSelector, privilegedNamespace, azureVnetIpamStateCmd},
{
name: "azure-vnet",
stateFileIPs: azureVnetStateIps,
podLabelSelector: privilegedLabelSelector,
podNamespace: privilegedNamespace,
cmd: azureVnetStateFileCmd,
},
{
name: "azure-vnet-ipam",
stateFileIPs: azureVnetIpamStateIps,
podLabelSelector: privilegedLabelSelector,
podNamespace: privilegedNamespace,
cmd: azureVnetIpamStateCmd,
},
},
"cniv2": {
{"cns cache", cnsCacheStateFileIps, cnsLabelSelector, privilegedNamespace, cnsCachedAssignedIPStateCmd},
{"azure-vnet", azureVnetStateIps, privilegedLabelSelector, privilegedNamespace, azureVnetStateFileCmd}, // cns configmap "ManageEndpointState": false, | Endpoints managed in CNI State File
{
name: "cns cache",
stateFileIPs: cnsCacheStateFileIps,
podLabelSelector: validatorPod,
podNamespace: privilegedNamespace,
containerName: "debug",
cmd: cnsCachedAssignedIPStateCmd,
},
{
name: "azure-vnet",
stateFileIPs: azureVnetStateIps,
podLabelSelector: privilegedLabelSelector,
podNamespace: privilegedNamespace,
cmd: azureVnetStateFileCmd,
}, // cns configmap "ManageEndpointState": false, | Endpoints managed in CNI State File
},
"dualstack": {
{"cns cache", cnsCacheStateFileIps, cnsLabelSelector, privilegedNamespace, cnsCachedAssignedIPStateCmd},
{"azure dualstackoverlay", azureVnetStateIps, privilegedLabelSelector, privilegedNamespace, azureVnetStateFileCmd},
{
name: "cns cache",
stateFileIPs: cnsCacheStateFileIps,
podLabelSelector: validatorPod,
podNamespace: privilegedNamespace,
containerName: "debug",
cmd: cnsCachedAssignedIPStateCmd,
},
{
name: "azure dualstackoverlay",
stateFileIPs: azureVnetStateIps,
podLabelSelector: privilegedLabelSelector,
podNamespace: privilegedNamespace,
cmd: azureVnetStateFileCmd,
},
},
"cilium_dualstack": {
{"cns dualstack", cnsManagedStateFileDualStackIps, cnsLabelSelector, privilegedNamespace, cnsManagedStateFileCmd}, // cns configmap "ManageEndpointState": true, | Endpoints managed in CNS State File
{"cilium", ciliumStateFileDualStackIps, ciliumLabelSelector, privilegedNamespace, ciliumStateFileCmd},
{"cns cache", cnsCacheStateFileIps, cnsLabelSelector, privilegedNamespace, cnsCachedAssignedIPStateCmd},
{
name: "cns dualstack",
stateFileIPs: cnsManagedStateFileDualStackIps,
podLabelSelector: validatorPod,
podNamespace: privilegedNamespace,
containerName: "debug",
cmd: cnsManagedStateFileCmd,
}, // cns configmap "ManageEndpointState": true, | Endpoints managed in CNS State File
{
name: "cilium",
stateFileIPs: ciliumStateFileDualStackIps,
podLabelSelector: ciliumLabelSelector,
podNamespace: privilegedNamespace,
cmd: ciliumStateFileCmd,
},
{
name: "cns cache",
stateFileIPs: cnsCacheStateFileIps,
podLabelSelector: validatorPod,
podNamespace: privilegedNamespace,
containerName: "debug",
cmd: cnsCachedAssignedIPStateCmd,
},
},
}
@ -266,7 +344,7 @@ func (v *Validator) validateRestartNetwork(ctx context.Context) error {
}
privilegedPod := pod.Items[0]
// exec into the pod to get the state file
_, err = acnk8s.ExecCmdOnPod(ctx, v.clientset, privilegedNamespace, privilegedPod.Name, restartNetworkCmd, v.config)
_, err = acnk8s.ExecCmdOnPod(ctx, v.clientset, privilegedNamespace, privilegedPod.Name, "", restartNetworkCmd, v.config)
if err != nil {
return errors.Wrapf(err, "failed to exec into privileged pod %s on node %s", privilegedPod.Name, node.Name)
}

Просмотреть файл

@ -52,9 +52,10 @@ type Validator struct {
type check struct {
name string
stateFileIps func([]byte) (map[string]string, error)
stateFileIPs func([]byte) (map[string]string, error)
podLabelSelector string
podNamespace string
containerName string
cmd []string
}
@ -114,7 +115,7 @@ func (v *Validator) Validate(ctx context.Context) error {
func (v *Validator) ValidateStateFile(ctx context.Context) error {
for _, check := range v.checks {
err := v.validateIPs(ctx, check.stateFileIps, check.cmd, check.name, check.podNamespace, check.podLabelSelector)
err := v.validateIPs(ctx, check.stateFileIPs, check.cmd, check.name, check.podNamespace, check.podLabelSelector, check.containerName)
if err != nil {
return err
}
@ -122,7 +123,7 @@ func (v *Validator) ValidateStateFile(ctx context.Context) error {
return nil
}
func (v *Validator) validateIPs(ctx context.Context, stateFileIps stateFileIpsFunc, cmd []string, checkType, namespace, labelSelector string) error {
func (v *Validator) validateIPs(ctx context.Context, stateFileIps stateFileIpsFunc, cmd []string, checkType, namespace, labelSelector, containerName string) error {
log.Printf("Validating %s state file", checkType)
nodes, err := acnk8s.GetNodeListByLabelSelector(ctx, v.clientset, nodeSelectorMap[v.os])
if err != nil {
@ -140,7 +141,8 @@ func (v *Validator) validateIPs(ctx context.Context, stateFileIps stateFileIpsFu
}
podName := pod.Items[0].Name
// exec into the pod to get the state file
result, err := acnk8s.ExecCmdOnPod(ctx, v.clientset, namespace, podName, cmd, v.config)
log.Printf("Executing command %s on pod %s, container %s", cmd, podName, containerName)
result, err := acnk8s.ExecCmdOnPod(ctx, v.clientset, namespace, podName, containerName, cmd, v.config)
if err != nil {
return errors.Wrapf(err, "failed to exec into privileged pod - %s", podName)
}

Просмотреть файл

@ -33,14 +33,50 @@ var (
var windowsChecksMap = map[string][]check{
"cniv1": {
{"hns", hnsStateFileIps, privilegedLabelSelector, privilegedNamespace, hnsEndPointCmd},
{"azure-vnet", azureVnetIps, privilegedLabelSelector, privilegedNamespace, azureVnetCmd},
{"azure-vnet-ipam", azureVnetIpamIps, privilegedLabelSelector, privilegedNamespace, azureVnetIpamCmd},
{
name: "hns",
stateFileIPs: hnsStateFileIPs,
podLabelSelector: privilegedLabelSelector,
podNamespace: privilegedNamespace,
cmd: hnsEndPointCmd,
},
{
name: "azure-vnet",
stateFileIPs: azureVnetIps,
podLabelSelector: privilegedLabelSelector,
podNamespace: privilegedNamespace,
cmd: azureVnetCmd,
},
{
name: "azure-vnet-ipam",
stateFileIPs: azureVnetIpamIps,
podLabelSelector: privilegedLabelSelector,
podNamespace: privilegedNamespace,
cmd: azureVnetIpamCmd,
},
},
"cniv2": {
{"hns", hnsStateFileIps, privilegedLabelSelector, privilegedNamespace, hnsEndPointCmd},
{"azure-vnet", azureVnetIps, privilegedLabelSelector, privilegedNamespace, azureVnetCmd},
{"cns cache", cnsCacheStateFileIps, cnsWinLabelSelector, privilegedNamespace, cnsWinCachedAssignedIPStateCmd},
{
name: "hns",
stateFileIPs: hnsStateFileIPs,
podLabelSelector: privilegedLabelSelector,
podNamespace: privilegedNamespace,
cmd: hnsEndPointCmd,
},
{
name: "azure-vnet",
stateFileIPs: azureVnetIps,
podLabelSelector: privilegedLabelSelector,
podNamespace: privilegedNamespace,
cmd: azureVnetCmd,
},
{
name: "cns cache",
stateFileIPs: cnsCacheStateFileIps,
podLabelSelector: cnsWinLabelSelector,
podNamespace: privilegedNamespace,
cmd: cnsWinCachedAssignedIPStateCmd,
},
},
}
@ -101,7 +137,7 @@ type AddressRecord struct {
InUse bool
}
func hnsStateFileIps(result []byte) (map[string]string, error) {
func hnsStateFileIPs(result []byte) (map[string]string, error) {
jsonType := bytes.TrimLeft(result, " \t\r\n")
isObject := jsonType[0] == '{'
isArray := jsonType[0] == '['
@ -209,7 +245,7 @@ func validateHNSNetworkState(ctx context.Context, nodes *corev1.NodeList, client
}
podName := pod.Items[0].Name
// exec into the pod to get the state file
result, err := acnk8s.ExecCmdOnPod(ctx, clientset, privilegedNamespace, podName, hnsNetworkCmd, restConfig)
result, err := acnk8s.ExecCmdOnPod(ctx, clientset, privilegedNamespace, podName, "", hnsNetworkCmd, restConfig)
if err != nil {
return errors.Wrap(err, "failed to exec into privileged pod")
}