test: pipeline changes for cilium nodesubnet (#3083)
* chore: separate pipeline changes for cilium nodesubnet * fix:make linter happy * chore: remove node subnet cilium version tag * fix: make private fork version tags compatible
This commit is contained in:
Родитель
1c1bbaa924
Коммит
b5a7f14ecd
|
@ -261,6 +261,17 @@ stages:
|
|||
k8sVersion: ""
|
||||
dependsOn: "containerize"
|
||||
|
||||
# Cilium Nodesubnet E2E tests
|
||||
- template: singletenancy/cilium-nodesubnet/cilium-nodesubnet-e2e-job-template.yaml
|
||||
parameters:
|
||||
name: "cilium_nodesubnet_e2e"
|
||||
displayName: Cilium NodeSubnet
|
||||
clusterType: nodesubnet-byocni-nokubeproxy-up
|
||||
clusterName: "cilndsubnete2e"
|
||||
vmSize: Standard_B2s
|
||||
k8sVersion: ""
|
||||
dependsOn: "containerize"
|
||||
|
||||
# Cilium Overlay E2E tests
|
||||
- template: singletenancy/cilium-overlay/cilium-overlay-e2e-job-template.yaml
|
||||
parameters:
|
||||
|
@ -405,6 +416,7 @@ stages:
|
|||
- azure_overlay_stateless_e2e
|
||||
- aks_swift_e2e
|
||||
- cilium_e2e
|
||||
- cilium_nodesubnet_e2e
|
||||
- cilium_overlay_e2e
|
||||
- cilium_h_overlay_e2e
|
||||
- aks_ubuntu_22_linux_e2e
|
||||
|
@ -426,6 +438,10 @@ stages:
|
|||
name: cilium_e2e
|
||||
clusterName: "ciliume2e"
|
||||
region: $(REGION_AKS_CLUSTER_TEST)
|
||||
cilium_nodesubnet_e2e:
|
||||
name: cilium_nodesubnet_e2e
|
||||
clusterName: "cilndsubnete2e"
|
||||
region: $(REGION_AKS_CLUSTER_TEST)
|
||||
cilium_overlay_e2e:
|
||||
name: cilium_overlay_e2e
|
||||
clusterName: "cilovere2e"
|
||||
|
|
|
@ -71,9 +71,9 @@ stages:
|
|||
os: ${{ parameters.os }}
|
||||
datapath: true
|
||||
dns: true
|
||||
cni: cilium
|
||||
portforward: true
|
||||
service: true
|
||||
hostport: true
|
||||
dependsOn: ${{ parameters.name }}
|
||||
|
||||
- job: failedE2ELogs
|
||||
|
|
|
@ -58,7 +58,7 @@ steps:
|
|||
kubectl cluster-info
|
||||
kubectl get po -owide -A
|
||||
echo "install Cilium ${CILIUM_VERSION_TAG}"
|
||||
export DIR=${CILIUM_VERSION_TAG%.*}
|
||||
export DIR=$(echo ${CILIUM_VERSION_TAG#v} | cut -d. -f1,2)
|
||||
echo "installing files from ${DIR}"
|
||||
echo "deploy Cilium ConfigMap"
|
||||
kubectl apply -f test/integration/manifests/cilium/v${DIR}/cilium-config/cilium-config.yaml
|
||||
|
|
|
@ -19,6 +19,7 @@ OS_SKU_WIN ?= Windows2022
|
|||
REGION ?= westus2
|
||||
VM_SIZE ?= Standard_B2s
|
||||
VM_SIZE_WIN ?= Standard_B2s
|
||||
KUBE_PROXY_JSON_PATH ?= ./kube-proxy.json
|
||||
|
||||
# overrideable variables
|
||||
SUB ?= $(AZURE_SUBSCRIPTION)
|
||||
|
@ -103,13 +104,13 @@ nodesubnet-byocni-nokubeproxy-up: rg-up overlay-net-up ## Brings up an NodeSubne
|
|||
--kubernetes-version $(K8S_VER) \
|
||||
--node-count $(NODE_COUNT) \
|
||||
--node-vm-size $(VM_SIZE) \
|
||||
--load-balancer-sku basic \
|
||||
--load-balancer-sku standard \
|
||||
--max-pods 250 \
|
||||
--network-plugin none \
|
||||
--vnet-subnet-id /subscriptions/$(SUB)/resourceGroups/$(GROUP)/providers/Microsoft.Network/virtualNetworks/$(VNET)/subnets/nodenet \
|
||||
--os-sku $(OS_SKU) \
|
||||
--no-ssh-key \
|
||||
--kube-proxy-config ./kube-proxy.json \
|
||||
--kube-proxy-config $(KUBE_PROXY_JSON_PATH) \
|
||||
--yes
|
||||
@$(MAKE) set-kubeconf
|
||||
|
||||
|
@ -146,7 +147,7 @@ overlay-byocni-nokubeproxy-up: rg-up overlay-net-up ## Brings up an Overlay BYO
|
|||
--pod-cidr 192.168.0.0/16 \
|
||||
--vnet-subnet-id /subscriptions/$(SUB)/resourceGroups/$(GROUP)/providers/Microsoft.Network/virtualNetworks/$(VNET)/subnets/nodenet \
|
||||
--no-ssh-key \
|
||||
--kube-proxy-config ./kube-proxy.json \
|
||||
--kube-proxy-config $(KUBE_PROXY_JSON_PATH) \
|
||||
--yes
|
||||
@$(MAKE) set-kubeconf
|
||||
|
||||
|
@ -215,7 +216,7 @@ swift-byocni-nokubeproxy-up: rg-up swift-net-up ## Bring up a SWIFT BYO CNI clus
|
|||
--pod-subnet-id /subscriptions/$(SUB)/resourceGroups/$(GROUP)/providers/Microsoft.Network/virtualNetworks/$(VNET)/subnets/podnet \
|
||||
--no-ssh-key \
|
||||
--os-sku $(OS_SKU) \
|
||||
--kube-proxy-config ./kube-proxy.json \
|
||||
--kube-proxy-config $(KUBE_PROXY_JSON_PATH) \
|
||||
--yes
|
||||
@$(MAKE) set-kubeconf
|
||||
|
||||
|
@ -305,7 +306,7 @@ vnetscale-swift-byocni-nokubeproxy-up: rg-up vnetscale-swift-net-up ## Bring up
|
|||
--pod-subnet-id /subscriptions/$(SUB)/resourceGroups/$(GROUP)/providers/Microsoft.Network/virtualNetworks/$(VNET)/subnets/podnet \
|
||||
--no-ssh-key \
|
||||
--os-sku $(OS_SKU) \
|
||||
--kube-proxy-config ./kube-proxy.json \
|
||||
--kube-proxy-config $(KUBE_PROXY_JSON_PATH) \
|
||||
--yes
|
||||
@$(MAKE) set-kubeconf
|
||||
|
||||
|
@ -434,7 +435,7 @@ dualstack-byocni-nokubeproxy-up: rg-up overlay-net-up ## Brings up a Dualstack o
|
|||
--ip-families ipv4,ipv6 \
|
||||
--aks-custom-headers AKSHTTPCustomFeatures=Microsoft.ContainerService/AzureOverlayDualStackPreview \
|
||||
--no-ssh-key \
|
||||
--kube-proxy-config ./kube-proxy.json \
|
||||
--kube-proxy-config $(KUBE_PROXY_JSON_PATH) \
|
||||
--yes
|
||||
@$(MAKE) set-kubeconf
|
||||
|
||||
|
|
|
@ -24,6 +24,7 @@ AKS Clusters
|
|||
byocni-up Alias to swift-byocni-up
|
||||
cilium-up Alias to swift-cilium-up
|
||||
up Alias to swift-up
|
||||
nodesubnet-byocni-nokubeproxy-up Bring up a Nodesubnet BYO CNI cluster. Does not include secondary IP configs.
|
||||
overlay-byocni-up Bring up a Overlay BYO CNI cluster
|
||||
overlay-byocni-nokubeproxy-up Bring up a Overlay BYO CNI cluster without kube-proxy
|
||||
overlay-cilium-up Bring up a Overlay Cilium cluster
|
||||
|
|
|
@ -12,14 +12,15 @@ import (
|
|||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func runCommand(command string) (string, error) {
|
||||
cmd := exec.Command("bash", "-c", command)
|
||||
func runAzCommand(params ...string) (string, error) {
|
||||
var out bytes.Buffer
|
||||
var stderr bytes.Buffer
|
||||
var err error
|
||||
fmt.Println("Running Azure CLI command ", strings.Join(params, " "))
|
||||
for i := 0; i < 3; i++ {
|
||||
cmd := exec.Command("az", params...)
|
||||
cmd.Stdout = &out
|
||||
cmd.Stderr = &stderr
|
||||
var err error
|
||||
for i := 0; i < 3; i++ {
|
||||
err = cmd.Run()
|
||||
if err == nil {
|
||||
break
|
||||
|
@ -27,7 +28,7 @@ func runCommand(command string) (string, error) {
|
|||
}
|
||||
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "command failed")
|
||||
return "", errors.Wrap(err, "command failed "+stderr.String())
|
||||
}
|
||||
|
||||
return out.String(), nil
|
||||
|
@ -60,16 +61,14 @@ func main() {
|
|||
os.Exit(1)
|
||||
}
|
||||
|
||||
command := fmt.Sprintf("az vmss list -g %s --query '[0].name' -o tsv", resourceGroup)
|
||||
result, err := runCommand(command)
|
||||
result, err := runAzCommand("vmss", "list", "-g", resourceGroup, "--query", "[0].name", "-o", "tsv")
|
||||
if err != nil {
|
||||
fmt.Printf("Command failed with error: %s\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
vmssName := strings.TrimSpace(result)
|
||||
|
||||
command = fmt.Sprintf("az vmss show -g %s -n %s", resourceGroup, vmssName)
|
||||
result, err = runCommand(command)
|
||||
result, err = runAzCommand("vmss", "show", "-g", resourceGroup, "-n", vmssName)
|
||||
if err != nil {
|
||||
fmt.Printf("Command failed with error: %s\n", err)
|
||||
os.Exit(1)
|
||||
|
@ -104,8 +103,14 @@ func main() {
|
|||
for i := 2; i <= secondaryConfigCount+1; i++ {
|
||||
ipConfig := make(map[string]interface{})
|
||||
for k, v := range primaryIPConfig {
|
||||
// only the primary config needs loadBalancerBackendAddressPools. Azure doesn't allow
|
||||
// secondary IP configs to be associated load balancer backend pools.
|
||||
if k == "loadBalancerBackendAddressPools" {
|
||||
continue
|
||||
}
|
||||
ipConfig[k] = v
|
||||
}
|
||||
|
||||
ipConfigName := fmt.Sprintf("ipconfig%d", i)
|
||||
if !contains(usedIPConfigNames, ipConfigName) {
|
||||
ipConfig["name"] = ipConfigName
|
||||
|
@ -125,20 +130,13 @@ func main() {
|
|||
os.Exit(1)
|
||||
}
|
||||
|
||||
escapedNetworkProfileJSON := strings.ReplaceAll(string(networkProfileJSON), `\`, `\\`)
|
||||
escapedNetworkProfileJSON = strings.ReplaceAll(escapedNetworkProfileJSON, `'`, `\'`)
|
||||
|
||||
command = fmt.Sprintf("az vmss update -g %s -n %s --set virtualMachineProfile.networkProfile='%s'", resourceGroup, vmssName, escapedNetworkProfileJSON)
|
||||
fmt.Println("Command to update VMSS: ", command)
|
||||
_, err = runCommand(command)
|
||||
_, err = runAzCommand("vmss", "update", "-g", resourceGroup, "-n", vmssName, "--set", fmt.Sprintf("virtualMachineProfile.networkProfile=%s", networkProfileJSON))
|
||||
if err != nil {
|
||||
fmt.Printf("Command failed with error: %s\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
command = fmt.Sprintf("az vmss update-instances -g %s -n %s --instance-ids '*'", resourceGroup, vmssName)
|
||||
fmt.Println("Command to update VMSS instances: ", command)
|
||||
_, err = runCommand(command)
|
||||
_, err = runAzCommand("vmss", "update-instances", "-g", resourceGroup, "-n", vmssName, "--instance-ids", "*")
|
||||
if err != nil {
|
||||
fmt.Printf("Command failed with error: %s\n", err)
|
||||
os.Exit(1)
|
||||
|
|
|
@ -13,5 +13,6 @@ data:
|
|||
- 192.168.0.0/16
|
||||
- 100.64.0.0/10
|
||||
- 10.244.0.0/16
|
||||
- 10.10.0.0/16
|
||||
masqLinkLocal: false
|
||||
masqLinkLocalIPv6: true
|
||||
|
|
|
@ -11,4 +11,5 @@ data:
|
|||
- 192.168.0.0/16
|
||||
- 100.64.0.0/10
|
||||
- 10.244.0.0/16
|
||||
- 10.10.0.0/16
|
||||
masqLinkLocal: true
|
||||
|
|
Загрузка…
Ссылка в новой задаче