azure-container-networking/Makefile

883 строки
33 KiB
Makefile
Исходник Постоянная ссылка Обычный вид История

.DEFAULT_GOAL := help
# Default platform commands
SHELL = /bin/bash
MKDIR := mkdir -p
RMDIR := rm -rf
ARCHIVE_CMD = tar -czvf
# Default platform extensions
ARCHIVE_EXT = tgz
# Windows specific commands
ifeq ($(OS),Windows_NT)
MKDIR := powershell.exe -NoProfile -Command New-Item -ItemType Directory -Force
RMDIR := powershell.exe -NoProfile -Command Remove-Item -Recurse -Force
endif
# Build defaults.
GOOS ?= $(shell go env GOOS)
GOARCH ?= $(shell go env GOARCH)
GOOSES ?= "linux windows" # To override at the cli do: GOOSES="\"darwin bsd\""
GOARCHES ?= "amd64 arm64" # To override at the cli do: GOARCHES="\"ppc64 mips\""
# Windows specific extensions
# set these based on the GOOS, not the OS
ifeq ($(GOOS),windows)
ARCHIVE_CMD = zip -9lq
ARCHIVE_EXT = zip
EXE_EXT = .exe
endif
# Interrogate the git repo and set some variables
REPO_ROOT = $(shell git rev-parse --show-toplevel)
REVISION ?= $(shell git rev-parse --short HEAD)
ACN_VERSION ?= $(shell git describe --exclude "azure-ipam*" --exclude "dropgz*" --exclude "zapai*" --exclude "ipv6-hp-bpf*" --tags --always)
IPV6_HP_BPF_VERSION ?= $(notdir $(shell git describe --match "ipv6-hp-bpf*" --tags --always))
AZURE_IPAM_VERSION ?= $(notdir $(shell git describe --match "azure-ipam*" --tags --always))
CNI_VERSION ?= $(ACN_VERSION)
CNS_VERSION ?= $(ACN_VERSION)
NPM_VERSION ?= $(ACN_VERSION)
ZAPAI_VERSION ?= $(notdir $(shell git describe --match "zapai*" --tags --always))
2017-03-11 03:49:07 +03:00
# Build directories.
AZURE_IPAM_DIR = $(REPO_ROOT)/azure-ipam
IPV6_HP_BPF_DIR = $(REPO_ROOT)/bpf-prog/ipv6-hp-bpf
CNI_NET_DIR = $(REPO_ROOT)/cni/network/plugin
CNI_IPAM_DIR = $(REPO_ROOT)/cni/ipam/plugin
STATELESS_CNI_NET_DIR = $(REPO_ROOT)/cni/network/stateless
CNI_IPAMV6_DIR = $(REPO_ROOT)/cni/ipam/pluginv6
CNI_TELEMETRY_DIR = $(REPO_ROOT)/cni/telemetry/service
ACNCLI_DIR = $(REPO_ROOT)/tools/acncli
CNS_DIR = $(REPO_ROOT)/cns/service
NPM_DIR = $(REPO_ROOT)/npm/cmd
OUTPUT_DIR = $(REPO_ROOT)/output
BUILD_DIR = $(OUTPUT_DIR)/$(GOOS)_$(GOARCH)
AZURE_IPAM_BUILD_DIR = $(BUILD_DIR)/azure-ipam
IPV6_HP_BPF_BUILD_DIR = $(BUILD_DIR)/bpf-prog/ipv6-hp-bpf
IMAGE_DIR = $(OUTPUT_DIR)/images
CNI_BUILD_DIR = $(BUILD_DIR)/cni
ACNCLI_BUILD_DIR = $(BUILD_DIR)/acncli
STATELESS_CNI_BUILD_DIR = $(CNI_BUILD_DIR)/stateless
CNI_MULTITENANCY_BUILD_DIR = $(BUILD_DIR)/cni-multitenancy
feat: Add SNAT bridge to Native, decouple SNAT bridge (#1506) * Native Endpoint Client Add Endpoints * AddEndpointRules, ConfigureContainerInterfacesAndRoutes * Changed interface names, log statements nw.extIf.Name > eth0 (eth0) eth0.vlanid > eth0.X (eth0.1) %s%s hostIfName > vnet (A1veth0) %s%s-2 contIfName > container (B1veth0) * Renaming, using lib to set ns * Namespace "path" is /var/run/netns/<NS> * Loopback set up, Remove auto kernel subnet route * Cannot set link to up if it's in another NS * Multiple containers on same VNET NS * Delete Endpoint routes on Delete * Minimizing netns usage * Moving NS Exec Code * Further minimized netns.Set usage * Moved helper methods down, drafted tests * Removed DevName from Route Info, more tests * Test existing vnet ns, delete endpoint * NetNS interface for testing * Separated tests by namespace * Endpoints delete if they cannot be moved into NS * Namespace netns tests * Added Native Client to deleteEndpointImpl * Deletion of Endpoints Impl and Tests * Cleaned code (Tests ok) * Moved mock/netns to package (Tests ok) * Fixing Netns (wip) Moved netnsinterface to consumer package (network). Removed "Netns" from "NewNetns" and "NewMockNetns" as it is unambiguous. Changed uintptr to int and casted the int to uintptr when needed later. * Using errors.Wrap for error context (wip) * Removed sentence case (wip) * Removing variable predeclaration * Removed NewNativeEndpointClient Directly instantiating struct because nothing special happens in NewNativeEndpointClient * Removed generics from ExecuteInNS * Removed uintptr from mocknetns, tests compile Forgot to remove uintptr from mocknetns * Fix tests, lint * Fixes from linter Works on VMSS * Replacing references to ethX with vlan veth * Removed unnecessary log * Removed unnecessary mac, fix tests * Mockns method name enum * Unable to use GetNetworkInterfaceByName due to NS If I use GetNetworkInterface, I need to be in the vnet NS, but that means I will need to call ExecuteInNS, which causes tests to fail. * Fixes from linter * Assume if NS exists, vlan veth exists Tests ok * Fixes for Linter * Snat refactor * Fix delete tests * Fix delete tests bug * More snat refactor * Breaking, prepping for Native Snat Delete native endpoint snat route linux to remove errors and in theory, ovs should work fine again. * Go mod tidy for linting Hopefully this fixes the windows lint error * Add fields to native endpoint client for snat * Using New() func to create Native Client Creation of the native endpoint client is too complicated to directly instantiate. * Snat defaults * Insert SNAT entry points * Native Snat error handling * Breaking, decouple ovsctl from snat Proposed Solution implementation Moved ovsctlClient.AddPortOnOVSBridge to ovs_endpoint_snatroute_linux.go. Removed ovsctlclient from NewSnatClient. Removed ovsctlClient from testing file. * Delete unecessary ovssnat files * No lint on vishvananda netns Maybe this will fix the windows linter? * Build linux only for netns package Maybe this fixes the linter error? * Remove nolint to see if linter fails * Breaking, removed bridgeName bridgeName refers to the OVS Switch I believe * If native uses snat bridge, should also get IP * Breaking, Decouple or Wrap snat route * Check to see if snat triggered * Snat behaviors specific to ovs/native * Pass the pointer Add/Delete ok * Renaming to make consts public * Breaking, moving ovs specific parts of snat to ovs * Remove enable infra vnet (Tests ok) Tested: Allow Host to NC only Allow NC to Host only Allow both Wget Ping between containers Warning: Enable snat is still hard coded to true!!! * Move add port to after exists() check * Moved netns interface to caller, generalized tests Tests ok, Native ok * Typos * Reordered if statement, unwrapped arp Tests ok, ping ok, wget ok * Linted, wrapping errors * Go fumpt entire network package * Code markers removed, clean (Tests ok) OVS & Native: - Ping between two containers same VM, no packets on bridge - Ping between two containers diff VM, no packets on bridge - Ping other container not in vnet, no packets on bridge - Ping snat to container, packets on bridge - Ping container to snat, packets on bridge - Tcpdump confirmed on azSnatBr - Deletion of containers deletes appropriate interfaces * Renamed veth, fixed logs * Made deleteEndpoints logic clearer, renamed error * Renamed eth0 to primaryHostIfName, vlanEth to vlanIf * Deleted debug log * Corrected merge (hardware addr) (Tests ok) * Renamed vlan veth to hostExtIf_vlanID, Disabled RA eth0.2 makes disable RA look for a folder eth0 and then another sub folder "2". ("eth0/2") However, it should look for a folder named "eth0.2" literally. To solve this, we change the naming scheme to use an underscore instead. (Tests ok) * Renamed Native to TransparentVlan Confirmed basic functionality on VM with correct mode * Make file updated * Create azure-windows-multitenancy-transparent-vlan.conflist * Unified snat err format * Rename to transparent-vlan * Route table support added to local netlink * Moved SNAT to end of function * Defer deleting vlan interface on failure
2022-08-10 23:50:26 +03:00
CNI_MULTITENANCY_TRANSPARENT_VLAN_BUILD_DIR = $(BUILD_DIR)/cni-multitenancy-transparent-vlan
CNI_SWIFT_BUILD_DIR = $(BUILD_DIR)/cni-swift
CNI_OVERLAY_BUILD_DIR = $(BUILD_DIR)/cni-overlay
CNI_BAREMETAL_BUILD_DIR = $(BUILD_DIR)/cni-baremetal
2023-04-30 00:50:44 +03:00
CNI_DUALSTACK_BUILD_DIR = $(BUILD_DIR)/cni-dualstack
CNS_BUILD_DIR = $(BUILD_DIR)/cns
NPM_BUILD_DIR = $(BUILD_DIR)/npm
TOOLS_DIR = $(REPO_ROOT)/build/tools
TOOLS_BIN_DIR = $(TOOLS_DIR)/bin
CNI_AI_ID = 5515a1eb-b2bc-406a-98eb-ba462e6f0411
CNS_AI_ID = ce672799-8f08-4235-8c12-08563dc2acef
NPM_AI_ID = 014c22bd-4107-459e-8475-67909e96edcb
ACN_PACKAGE_PATH = github.com/Azure/azure-container-networking
CNI_AI_PATH=$(ACN_PACKAGE_PATH)/telemetry.aiMetadata
CNS_AI_PATH=$(ACN_PACKAGE_PATH)/cns/logger.aiMetadata
NPM_AI_PATH=$(ACN_PACKAGE_PATH)/npm.aiMetadata
2017-01-05 03:21:14 +03:00
# Tool paths
CONTROLLER_GEN := $(TOOLS_BIN_DIR)/controller-gen
GOCOV := $(TOOLS_BIN_DIR)/gocov
GOCOV_XML := $(TOOLS_BIN_DIR)/gocov-xml
GOFUMPT := $(TOOLS_BIN_DIR)/gofumpt
GOLANGCI_LINT := $(TOOLS_BIN_DIR)/golangci-lint
GO_JUNIT_REPORT := $(TOOLS_BIN_DIR)/go-junit-report
MOCKGEN := $(TOOLS_BIN_DIR)/mockgen
# Archive file names.
ACNCLI_ARCHIVE_NAME = acncli-$(GOOS)-$(GOARCH)-$(ACN_VERSION).$(ARCHIVE_EXT)
CNI_ARCHIVE_NAME = azure-vnet-cni-$(GOOS)-$(GOARCH)-$(CNI_VERSION).$(ARCHIVE_EXT)
CNI_MULTITENANCY_ARCHIVE_NAME = azure-vnet-cni-multitenancy-$(GOOS)-$(GOARCH)-$(CNI_VERSION).$(ARCHIVE_EXT)
CNI_MULTITENANCY_TRANSPARENT_VLAN_ARCHIVE_NAME = azure-vnet-cni-multitenancy-transparent-vlan-$(GOOS)-$(GOARCH)-$(CNI_VERSION).$(ARCHIVE_EXT)
CNI_SWIFT_ARCHIVE_NAME = azure-vnet-cni-swift-$(GOOS)-$(GOARCH)-$(CNI_VERSION).$(ARCHIVE_EXT)
CNI_OVERLAY_ARCHIVE_NAME = azure-vnet-cni-overlay-$(GOOS)-$(GOARCH)-$(CNI_VERSION).$(ARCHIVE_EXT)
CNI_BAREMETAL_ARCHIVE_NAME = azure-vnet-cni-baremetal-$(GOOS)-$(GOARCH)-$(CNI_VERSION).$(ARCHIVE_EXT)
2023-04-30 00:50:44 +03:00
CNI_DUALSTACK_ARCHIVE_NAME = azure-vnet-cni-overlay-dualstack-$(GOOS)-$(GOARCH)-$(CNI_VERSION).$(ARCHIVE_EXT)
CNS_ARCHIVE_NAME = azure-cns-$(GOOS)-$(GOARCH)-$(CNS_VERSION).$(ARCHIVE_EXT)
NPM_ARCHIVE_NAME = azure-npm-$(GOOS)-$(GOARCH)-$(NPM_VERSION).$(ARCHIVE_EXT)
AZURE_IPAM_ARCHIVE_NAME = azure-ipam-$(GOOS)-$(GOARCH)-$(AZURE_IPAM_VERSION).$(ARCHIVE_EXT)
IPV6_HP_BPF_ARCHIVE_NAME = ipv6-hp-bpf-$(GOOS)-$(GOARCH)-$(IPV6_HP_BPF_VERSION).$(ARCHIVE_EXT)
# Image info file names.
CNI_IMAGE_INFO_FILE = azure-cni-$(CNI_VERSION).txt
CNS_IMAGE_INFO_FILE = azure-cns-$(CNS_VERSION).txt
NPM_IMAGE_INFO_FILE = azure-npm-$(NPM_VERSION).txt
# Default target
all-binaries-platforms: ## Make all platform binaries
@for goos in "$(GOOSES)"; do \
for goarch in "$(GOARCHES)"; do \
make all-binaries GOOS=$$goos GOARCH=$$goarch; \
done \
done
# OS specific binaries/images
ifeq ($(GOOS),linux)
all-binaries: acncli azure-cni-plugin azure-cns azure-npm azure-ipam ipv6-hp-bpf
all-images: npm-image cns-image cni-manager-image ipv6-hp-bpf-image
2019-01-23 04:07:32 +03:00
else
all-binaries: azure-cni-plugin azure-cns azure-npm
2019-01-23 04:07:32 +03:00
all-images:
2018-07-21 23:15:04 +03:00
@echo "Nothing to build. Skip."
endif
# Shorthand target names for convenience.
azure-cni-plugin: azure-vnet-binary azure-vnet-stateless-binary azure-vnet-ipam-binary azure-vnet-ipamv6-binary azure-vnet-telemetry-binary cni-archive
azure-cns: azure-cns-binary cns-archive
acncli: acncli-binary acncli-archive
azure-npm: azure-npm-binary npm-archive
azure-ipam: azure-ipam-binary azure-ipam-archive
ipv6-hp-bpf: ipv6-hp-bpf-binary ipv6-hp-bpf-archive
2017-01-05 03:21:14 +03:00
##@ Versioning
revision: ## print the current git revision
@echo $(REVISION)
version: ## prints the root version
@echo $(ACN_VERSION)
acncli-version: version
azure-ipam-version: ## prints the azure-ipam version
@echo $(AZURE_IPAM_VERSION)
ipv6-hp-bpf-version: ## prints the ipv6-hp-bpf version
@echo $(IPV6_HP_BPF_VERSION)
cni-version: ## prints the cni version
@echo $(CNI_VERSION)
cns-version:
@echo $(CNS_VERSION)
npm-version:
@echo $(NPM_VERSION)
zapai-version: ## prints the zapai version
@echo $(ZAPAI_VERSION)
##@ Binaries
# Build the delegated IPAM plugin binary.
azure-ipam-binary:
cd $(AZURE_IPAM_DIR) && CGO_ENABLED=0 go build -v -o $(AZURE_IPAM_BUILD_DIR)/azure-ipam$(EXE_EXT) -ldflags "-X github.com/Azure/azure-container-networking/azure-ipam/internal/buildinfo.Version=$(AZURE_IPAM_VERSION)" -gcflags="-dwarflocationlists=true"
# Build the ipv6-hp-bpf binary.
ipv6-hp-bpf-binary:
cd $(IPV6_HP_BPF_DIR) && CGO_ENABLED=0 go generate ./...
cd $(IPV6_HP_BPF_DIR)/cmd/ipv6-hp-bpf && CGO_ENABLED=0 go build -v -o $(IPV6_HP_BPF_BUILD_DIR)/ipv6-hp-bpf$(EXE_EXT) -ldflags "-X main.version=$(IPV6_HP_BPF_VERSION)" -gcflags="-dwarflocationlists=true"
# Libraries for ipv6-hp-bpf
ipv6-hp-bpf-lib:
ifeq ($(GOARCH),amd64)
sudo apt-get update && sudo apt-get install -y llvm clang linux-libc-dev linux-headers-generic libbpf-dev libc6-dev nftables iproute2 gcc-multilib
for dir in /usr/include/x86_64-linux-gnu/*; do sudo ln -sfn "$$dir" /usr/include/$$(basename "$$dir"); done
else ifeq ($(GOARCH),arm64)
sudo apt-get update && sudo apt-get install -y llvm clang linux-libc-dev linux-headers-generic libbpf-dev libc6-dev nftables iproute2 gcc-aarch64-linux-gnu
for dir in /usr/include/aarch64-linux-gnu/*; do sudo ln -sfn "$$dir" /usr/include/$$(basename "$$dir"); done
endif
# Build the Azure CNI network binary.
azure-vnet-binary:
cd $(CNI_NET_DIR) && CGO_ENABLED=0 go build -v -o $(CNI_BUILD_DIR)/azure-vnet$(EXE_EXT) -ldflags "-X main.version=$(CNI_VERSION)" -gcflags="-dwarflocationlists=true"
# Build the Azure CNI stateless network binary
azure-vnet-stateless-binary:
cd $(STATELESS_CNI_NET_DIR) && CGO_ENABLED=0 go build -v -o $(STATELESS_CNI_BUILD_DIR)/azure-vnet$(EXE_EXT) -ldflags "-X main.version=$(CNI_VERSION)" -gcflags="-dwarflocationlists=true"
# Build the Azure CNI IPAM binary.
azure-vnet-ipam-binary:
cd $(CNI_IPAM_DIR) && CGO_ENABLED=0 go build -v -o $(CNI_BUILD_DIR)/azure-vnet-ipam$(EXE_EXT) -ldflags "-X main.version=$(CNI_VERSION)" -gcflags="-dwarflocationlists=true"
# Build the Azure CNI IPAMV6 binary.
azure-vnet-ipamv6-binary:
cd $(CNI_IPAMV6_DIR) && CGO_ENABLED=0 go build -v -o $(CNI_BUILD_DIR)/azure-vnet-ipamv6$(EXE_EXT) -ldflags "-X main.version=$(CNI_VERSION)" -gcflags="-dwarflocationlists=true"
# Build the Azure CNI telemetry binary.
azure-vnet-telemetry-binary:
cd $(CNI_TELEMETRY_DIR) && CGO_ENABLED=0 go build -v -o $(CNI_BUILD_DIR)/azure-vnet-telemetry$(EXE_EXT) -ldflags "-X main.version=$(CNI_VERSION) -X $(CNI_AI_PATH)=$(CNI_AI_ID)" -gcflags="-dwarflocationlists=true"
# Build the Azure CLI network binary.
acncli-binary:
cd $(ACNCLI_DIR) && CGO_ENABLED=0 go build -v -o $(ACNCLI_BUILD_DIR)/acn$(EXE_EXT) -ldflags "-X main.version=$(ACN_VERSION)" -gcflags="-dwarflocationlists=true"
# Build the Azure CNS binary.
azure-cns-binary:
cd $(CNS_DIR) && CGO_ENABLED=0 go build -v -o $(CNS_BUILD_DIR)/azure-cns$(EXE_EXT) -ldflags "-X main.version=$(CNS_VERSION) -X $(CNS_AI_PATH)=$(CNS_AI_ID) -X $(CNI_AI_PATH)=$(CNI_AI_ID)" -gcflags="-dwarflocationlists=true"
# Build the Azure NPM binary.
azure-npm-binary:
cd $(CNI_TELEMETRY_DIR) && CGO_ENABLED=0 go build -v -o $(NPM_BUILD_DIR)/azure-vnet-telemetry$(EXE_EXT) -ldflags "-X main.version=$(NPM_VERSION)" -gcflags="-dwarflocationlists=true"
cd $(NPM_DIR) && CGO_ENABLED=0 go build -v -o $(NPM_BUILD_DIR)/azure-npm$(EXE_EXT) -ldflags "-X main.version=$(NPM_VERSION) -X $(NPM_AI_PATH)=$(NPM_AI_ID)" -gcflags="-dwarflocationlists=true"
##@ Containers
## Common variables for all containers.
IMAGE_REGISTRY ?= acnpublic.azurecr.io
OS ?= $(GOOS)
ARCH ?= $(GOARCH)
PLATFORM ?= $(OS)/$(ARCH)
BUILDX_ACTION ?= --load
CONTAINER_BUILDER ?= buildah
CONTAINER_RUNTIME ?= podman
CONTAINER_TRANSPORT ?= skopeo
# prefer buildah, if available, but fall back to docker if that binary is not in the path or on Windows.
ifeq (, $(shell which $(CONTAINER_BUILDER)))
CONTAINER_BUILDER = docker
endif
ifeq ($(OS), windows)
CONTAINER_BUILDER = docker
endif
# prefer podman, if available, but fall back to docker if that binary is not in the path or on Windows.
ifeq (, $(shell which $(CONTAINER_RUNTIME)))
CONTAINER_RUNTIME = docker
endif
ifeq ($(OS), windows)
CONTAINER_RUNTIME = docker
endif
# prefer skopeo, if available, but fall back to docker if that binary is not in the path. or on Windows
ifeq (, $(shell which $(CONTAINER_TRANSPORT)))
CONTAINER_TRANSPORT = docker
endif
ifeq ($(OS), windows)
CONTAINER_TRANSPORT = docker
endif
## Image name definitions.
ACNCLI_IMAGE = acncli
AZURE_IPAM_IMAGE = azure-ipam
IPV6_HP_BPF_IMAGE = ipv6-hp-bpf
CNI_IMAGE = azure-cni
CNS_IMAGE = azure-cns
NPM_IMAGE = azure-npm
## Image platform tags.
ACNCLI_PLATFORM_TAG ?= $(subst /,-,$(PLATFORM))-$(ACN_VERSION)
AZURE_IPAM_PLATFORM_TAG ?= $(subst /,-,$(PLATFORM))-$(AZURE_IPAM_VERSION)
AZURE_IPAM_WINDOWS_PLATFORM_TAG ?= $(subst /,-,$(PLATFORM))-$(AZURE_IPAM_VERSION)-$(OS_SKU_WIN)
IPV6_HP_BPF_IMAGE_PLATFORM_TAG ?= $(subst /,-,$(PLATFORM))-$(IPV6_HP_BPF_VERSION)
CNI_PLATFORM_TAG ?= $(subst /,-,$(PLATFORM))-$(CNI_VERSION)
CNI_WINDOWS_PLATFORM_TAG ?= $(subst /,-,$(PLATFORM))-$(CNI_VERSION)-$(OS_SKU_WIN)
CNS_PLATFORM_TAG ?= $(subst /,-,$(PLATFORM))-$(CNS_VERSION)
CNS_WINDOWS_PLATFORM_TAG ?= $(subst /,-,$(PLATFORM))-$(CNS_VERSION)-$(OS_SKU_WIN)
NPM_PLATFORM_TAG ?= $(subst /,-,$(PLATFORM))-$(NPM_VERSION)
qemu-user-static: ## Set up the host to run qemu multiplatform container builds.
sudo $(CONTAINER_RUNTIME) run --rm --privileged multiarch/qemu-user-static --reset -p yes
## Reusable build targets for building individual container images.
container-buildah: # util target to build container images using buildah. do not invoke directly.
buildah bud \
--build-arg ARCH=$(ARCH) \
--build-arg OS=$(OS) \
--build-arg PLATFORM=$(PLATFORM) \
--build-arg VERSION=$(TAG) \
$(EXTRA_BUILD_ARGS) \
--jobs 16 \
--platform $(PLATFORM) \
--target $(TARGET) \
-f $(DOCKERFILE) \
-t $(IMAGE_REGISTRY)/$(IMAGE):$(TAG) \
.
buildah push $(IMAGE_REGISTRY)/$(IMAGE):$(TAG)
container-docker: # util target to build container images using docker buildx. do not invoke directly.
docker buildx create --use --driver-opt image=mcr.microsoft.com/oss/v2/moby/buildkit:v0.16.0-2 --platform $(PLATFORM)
docker buildx build \
$(BUILDX_ACTION) \
--build-arg ARCH=$(ARCH) \
--build-arg OS=$(OS) \
--build-arg PLATFORM=$(PLATFORM) \
--build-arg VERSION=$(TAG) \
$(EXTRA_BUILD_ARGS) \
--platform $(PLATFORM) \
--target $(TARGET) \
-f $(DOCKERFILE) \
-t $(IMAGE_REGISTRY)/$(IMAGE):$(TAG) \
.
container: # util target to build container images. do not invoke directly.
$(MAKE) container-$(CONTAINER_BUILDER) \
ARCH=$(ARCH) \
OS=$(OS) \
PLATFORM=$(PLATFORM) \
TAG=$(TAG) \
TARGET=$(TARGET)
container-push: # util target to publish container image. do not invoke directly.
$(CONTAINER_BUILDER) push \
$(IMAGE_REGISTRY)/$(IMAGE):$(TAG)
container-pull: # util target to pull container image. do not invoke directly.
$(CONTAINER_BUILDER) pull \
$(IMAGE_REGISTRY)/$(IMAGE):$(TAG)
## Build specific container images.
# acncli
acncli-image-name: # util target to print the CNI manager image name.
@echo $(ACNCLI_IMAGE)
acncli-image-name-and-tag: # util target to print the CNI manager image name and tag.
@echo $(IMAGE_REGISTRY)/$(ACNCLI_IMAGE):$(ACNCLI_PLATFORM_TAG)
acncli-image: ## build cni-manager container image.
$(MAKE) container \
DOCKERFILE=tools/acncli/Dockerfile \
IMAGE=$(ACNCLI_IMAGE) \
TAG=$(ACNCLI_PLATFORM_TAG)
acncli-image-push: ## push cni-manager container image.
$(MAKE) container-push \
IMAGE=$(ACNCLI_IMAGE) \
TAG=$(ACNCLI_PLATFORM_TAG)
acncli-image-pull: ## pull cni-manager container image.
$(MAKE) container-pull \
IMAGE=$(ACNCLI_IMAGE) \
TAG=$(ACNCLI_PLATFORM_TAG)
# azure-ipam
azure-ipam-image-name: # util target to print the azure-ipam image name.
@echo $(AZURE_IPAM_IMAGE)
azure-ipam-image-name-and-tag: # util target to print the azure-ipam image name and tag.
@echo $(IMAGE_REGISTRY)/$(AZURE_IPAM_IMAGE):$(AZURE_IPAM_PLATFORM_TAG)
azure-ipam-image: ## build azure-ipam container image.
$(MAKE) container \
DOCKERFILE=azure-ipam/Dockerfile \
IMAGE=$(AZURE_IPAM_IMAGE) \
PLATFORM=$(PLATFORM) \
TAG=$(AZURE_IPAM_PLATFORM_TAG) \
TARGET=$(OS) \
OS=$(OS) \
ARCH=$(ARCH)
azure-ipam-image-push: ## push azure-ipam container image.
$(MAKE) container-push \
IMAGE=$(AZURE_IPAM_IMAGE) \
TAG=$(AZURE_IPAM_PLATFORM_TAG)
azure-ipam-image-pull: ## pull azure-ipam container image.
$(MAKE) container-pull \
IMAGE=$(AZURE_IPAM_IMAGE) \
TAG=$(AZURE_IPAM_PLATFORM_TAG)
# ipv6-hp-bpf
ipv6-hp-bpf-image-name: # util target to print the ipv6-hp-bpf image name.
@echo $(IPV6_HP_BPF_IMAGE)
ipv6-hp-bpf-image-name-and-tag: # util target to print the ipv6-hp-bpf image name and tag.
@echo $(IMAGE_REGISTRY)/$(IPV6_HP_BPF_IMAGE):$(IPV6_HP_BPF_IMAGE_PLATFORM_TAG)
ipv6-hp-bpf-image: ## build ipv6-hp-bpf container image.
$(MAKE) container \
DOCKERFILE=bpf-prog/ipv6-hp-bpf/$(OS).Dockerfile \
IMAGE=$(IPV6_HP_BPF_IMAGE) \
EXTRA_BUILD_ARGS='--build-arg OS=$(OS) --build-arg ARCH=$(ARCH) --build-arg DEBUG=$(DEBUG)'\
PLATFORM=$(PLATFORM) \
TAG=$(IPV6_HP_BPF_IMAGE_PLATFORM_TAG) \
TARGET=$(OS) \
OS=$(OS) \
ARCH=$(ARCH)
ipv6-hp-bpf-image-push: ## push ipv6-hp-bpf container image.
$(MAKE) container-push \
IMAGE=$(IPV6_HP_BPF_IMAGE) \
TAG=$(IPV6_HP_BPF_IMAGE_PLATFORM_TAG)
ipv6-hp-bpf-image-pull: ## pull ipv6-hp-bpf container image.
$(MAKE) container-pull \
IMAGE=$(IPV6_HP_BPF_IMAGE) \
TAG=$(IPV6_HP_BPF_IMAGE_PLATFORM_TAG)
# cni
cni-image-name: # util target to print the cni image name.
@echo $(CNI_IMAGE)
cni-image-name-and-tag: # util target to print the cni image name and tag.
@echo $(IMAGE_REGISTRY)/$(CNI_IMAGE):$(CNI_PLATFORM_TAG)
cni-image: ## build cni container image.
$(MAKE) container \
DOCKERFILE=cni/Dockerfile \
IMAGE=$(CNI_IMAGE) \
PLATFORM=$(PLATFORM) \
TAG=$(CNI_PLATFORM_TAG) \
TARGET=$(OS) \
OS=$(OS) \
ARCH=$(ARCH) \
EXTRA_BUILD_ARGS='--build-arg CNI_AI_PATH=$(CNI_AI_PATH) --build-arg CNI_AI_ID=$(CNI_AI_ID)'
cni-image-push: ## push cni container image.
$(MAKE) container-push \
IMAGE=$(CNI_IMAGE) \
TAG=$(CNI_PLATFORM_TAG)
cni-image-pull: ## pull cni container image.
$(MAKE) container-pull \
IMAGE=$(CNI_IMAGE) \
TAG=$(CNI_PLATFORM_TAG)
# cns
cns-image-name: # util target to print the CNS image name
@echo $(CNS_IMAGE)
cns-image-name-and-tag: # util target to print the CNS image name and tag.
@echo $(IMAGE_REGISTRY)/$(CNS_IMAGE):$(CNS_PLATFORM_TAG)
cns-image: ## build cns container image.
$(MAKE) container \
DOCKERFILE=cns/Dockerfile \
IMAGE=$(CNS_IMAGE) \
EXTRA_BUILD_ARGS='--build-arg CNS_AI_PATH=$(CNS_AI_PATH) --build-arg CNS_AI_ID=$(CNS_AI_ID)' \
PLATFORM=$(PLATFORM) \
TAG=$(CNS_PLATFORM_TAG) \
TARGET=$(OS) \
OS=$(OS) \
ARCH=$(ARCH)
cns-image-push: ## push cns container image.
$(MAKE) container-push \
IMAGE=$(CNS_IMAGE) \
TAG=$(CNS_PLATFORM_TAG)
cns-image-pull: ## pull cns container image.
$(MAKE) container-pull \
IMAGE=$(CNS_IMAGE) \
TAG=$(CNS_PLATFORM_TAG)
# npm
npm-image-name: # util target to print the NPM image name
@echo $(NPM_IMAGE)
npm-image-name-and-tag: # util target to print the NPM image name and tag.
@echo $(IMAGE_REGISTRY)/$(NPM_IMAGE):$(NPM_PLATFORM_TAG)
npm-image: ## build the npm container image.
$(MAKE) container-$(CONTAINER_BUILDER) \
DOCKERFILE=npm/$(OS).Dockerfile \
IMAGE=$(NPM_IMAGE) \
EXTRA_BUILD_ARGS='--build-arg NPM_AI_PATH=$(NPM_AI_PATH) --build-arg NPM_AI_ID=$(NPM_AI_ID)' \
PLATFORM=$(PLATFORM) \
TAG=$(NPM_PLATFORM_TAG) \
TARGET=$(OS) \
OS=$(OS) \
ARCH=$(ARCH)
npm-image-push: ## push npm container image.
$(MAKE) container-push \
IMAGE=$(NPM_IMAGE) \
TAG=$(NPM_PLATFORM_TAG)
npm-image-pull: ## pull cns container image.
$(MAKE) container-pull \
IMAGE=$(NPM_IMAGE) \
TAG=$(NPM_PLATFORM_TAG)
## Reusable targets for building multiplat container image manifests.
IMAGE_ARCHIVE_DIR ?= $(shell pwd)
manifest-create:
$(CONTAINER_BUILDER) manifest create $(IMAGE_REGISTRY)/$(IMAGE):$(TAG)
manifest-add:
$(CONTAINER_BUILDER) manifest add --os=$(OS) $(IMAGE_REGISTRY)/$(IMAGE):$(TAG) docker://$(IMAGE_REGISTRY)/$(IMAGE):$(subst /,-,$(PLATFORM))-$(TAG)
manifest-build: # util target to compose multiarch container manifests from platform specific images.
$(MAKE) manifest-create
$(foreach PLATFORM,$(PLATFORMS),\
$(if $(filter $(PLATFORM),windows/amd64),\
$(MAKE) manifest-add CONTAINER_BUILDER=$(CONTAINER_BUILDER) OS=windows OS_VERSION=$(OS_VERSION) PLATFORM=$(PLATFORM);,\
$(MAKE) manifest-add PLATFORM=$(PLATFORM);\
)\
)\
manifest-push: # util target to push multiarch container manifest.
$(CONTAINER_BUILDER) manifest push --all $(IMAGE_REGISTRY)/$(IMAGE):$(TAG) docker://$(IMAGE_REGISTRY)/$(IMAGE):$(TAG)
manifest-skopeo-archive: # util target to export tar archive of multiarch container manifest.
skopeo copy --all docker://$(IMAGE_REGISTRY)/$(IMAGE):$(TAG) oci-archive:$(IMAGE_ARCHIVE_DIR)/$(IMAGE)-$(TAG).tar --debug
## Build specific multiplat images.
acncli-manifest-build: ## build acncli multiplat container manifest.
$(MAKE) manifest-build \
PLATFORMS="$(PLATFORMS)" \
IMAGE=$(ACNCLI_IMAGE) \
TAG=$(ACN_VERSION)
acncli-manifest-push: ## push acncli multiplat container manifest
$(MAKE) manifest-push \
IMAGE=$(ACNCLI_IMAGE) \
TAG=$(ACN_VERSION)
acncli-skopeo-archive: ## export tar archive of acncli multiplat container manifest.
$(MAKE) manifest-skopeo-archive \
IMAGE=$(ACNCLI_IMAGE) \
TAG=$(ACN_VERSION)
azure-ipam-manifest-build: ## build azure-ipam multiplat container manifest.
$(MAKE) manifest-build \
PLATFORMS="$(PLATFORMS)" \
IMAGE=$(AZURE_IPAM_IMAGE) \
TAG=$(AZURE_IPAM_VERSION)
azure-ipam-manifest-push: ## push azure-ipam multiplat container manifest
$(MAKE) manifest-push \
IMAGE=$(AZURE_IPAM_IMAGE) \
TAG=$(AZURE_IPAM_VERSION)
azure-ipam-skopeo-archive: ## export tar archive of azure-ipam multiplat container manifest.
$(MAKE) manifest-skopeo-archive \
IMAGE=$(AZURE_IPAM_IMAGE) \
TAG=$(AZURE_IPAM_VERSION)
ipv6-hp-bpf-manifest-build: ## build ipv6-hp-bpf multiplat container manifest.
$(MAKE) manifest-build \
PLATFORMS="$(PLATFORMS)" \
IMAGE=$(IPV6_HP_BPF_IMAGE) \
TAG=$(IPV6_HP_BPF_VERSION)
ipv6-hp-bpf-manifest-push: ## push ipv6-hp-bpf multiplat container manifest
$(MAKE) manifest-push \
IMAGE=$(IPV6_HP_BPF_IMAGE) \
TAG=$(IPV6_HP_BPF_VERSION)
ipv6-hp-bpf-skopeo-archive: ## export tar archive of ipv6-hp-bpf multiplat container manifest.
$(MAKE) manifest-skopeo-archive \
IMAGE=$(IPV6_HP_BPF_IMAGE) \
TAG=$(IPV6_HP_BPF_VERSION)
cni-manifest-build: ## build cni multiplat container manifest.
$(MAKE) manifest-build \
PLATFORMS="$(PLATFORMS)" \
IMAGE=$(CNI_IMAGE) \
TAG=$(CNI_VERSION)
cni-manifest-push: ## push cni multiplat container manifest
$(MAKE) manifest-push \
IMAGE=$(CNI_IMAGE) \
TAG=$(CNI_VERSION)
cni-skopeo-archive: ## export tar archive of cni multiplat container manifest.
$(MAKE) manifest-skopeo-archive \
IMAGE=$(CNI_IMAGE) \
TAG=$(CNI_VERSION)
cns-manifest-build: ## build azure-cns multiplat container manifest.
$(MAKE) manifest-build \
PLATFORMS="$(PLATFORMS)" \
IMAGE=$(CNS_IMAGE) \
TAG=$(CNS_VERSION)
cns-manifest-push: ## push cns multiplat container manifest
$(MAKE) manifest-push \
IMAGE=$(CNS_IMAGE) \
TAG=$(CNS_VERSION)
cns-skopeo-archive: ## export tar archive of cns multiplat container manifest.
$(MAKE) manifest-skopeo-archive \
IMAGE=$(CNS_IMAGE) \
TAG=$(CNS_VERSION)
npm-manifest-build: ## build azure-npm multiplat container manifest.
$(MAKE) manifest-build \
PLATFORMS="$(PLATFORMS)" \
IMAGE=$(NPM_IMAGE) \
TAG=$(NPM_VERSION)
npm-manifest-push: ## push multiplat container manifest
$(MAKE) manifest-push \
IMAGE=$(NPM_IMAGE) \
TAG=$(NPM_VERSION)
npm-skopeo-archive: ## export tar archive of multiplat container manifest.
$(MAKE) manifest-skopeo-archive \
IMAGE=$(NPM_IMAGE) \
TAG=$(NPM_VERSION)
########################### Archives ################################
# Create a CNI archive for the target platform.
.PHONY: cni-archive
cni-archive: azure-vnet-binary azure-vnet-ipam-binary azure-vnet-ipamv6-binary azure-vnet-telemetry-binary
$(MKDIR) $(CNI_BUILD_DIR)
cp cni/azure-$(GOOS).conflist $(CNI_BUILD_DIR)/10-azure.conflist
cp telemetry/azure-vnet-telemetry.config $(CNI_BUILD_DIR)/azure-vnet-telemetry.config
cp $(STATELESS_CNI_BUILD_DIR)/azure-vnet$(EXE_EXT) $(CNI_BUILD_DIR)/azure-vnet-stateless$(EXE_EXT)
cd $(CNI_BUILD_DIR) && $(ARCHIVE_CMD) $(CNI_ARCHIVE_NAME) azure-vnet$(EXE_EXT) azure-vnet-stateless$(EXE_EXT) azure-vnet-ipam$(EXE_EXT) azure-vnet-ipamv6$(EXE_EXT) azure-vnet-telemetry$(EXE_EXT) 10-azure.conflist azure-vnet-telemetry.config
$(MKDIR) $(CNI_MULTITENANCY_BUILD_DIR)
cp cni/azure-$(GOOS)-multitenancy.conflist $(CNI_MULTITENANCY_BUILD_DIR)/10-azure.conflist
cp $(CNI_BUILD_DIR)/azure-vnet$(EXE_EXT) $(CNI_BUILD_DIR)/azure-vnet-ipam$(EXE_EXT) $(CNI_MULTITENANCY_BUILD_DIR)
ifeq ($(GOOS),linux)
cp telemetry/azure-vnet-telemetry.config $(CNI_MULTITENANCY_BUILD_DIR)/azure-vnet-telemetry.config
cp $(CNI_BUILD_DIR)/azure-vnet-telemetry$(EXE_EXT) $(CNI_MULTITENANCY_BUILD_DIR)
endif
cd $(CNI_MULTITENANCY_BUILD_DIR) && $(ARCHIVE_CMD) $(CNI_MULTITENANCY_ARCHIVE_NAME) azure-vnet$(EXE_EXT) azure-vnet-ipam$(EXE_EXT) azure-vnet-telemetry$(EXE_EXT) 10-azure.conflist azure-vnet-telemetry.config
ifeq ($(GOOS),linux)
feat: Add SNAT bridge to Native, decouple SNAT bridge (#1506) * Native Endpoint Client Add Endpoints * AddEndpointRules, ConfigureContainerInterfacesAndRoutes * Changed interface names, log statements nw.extIf.Name > eth0 (eth0) eth0.vlanid > eth0.X (eth0.1) %s%s hostIfName > vnet (A1veth0) %s%s-2 contIfName > container (B1veth0) * Renaming, using lib to set ns * Namespace "path" is /var/run/netns/<NS> * Loopback set up, Remove auto kernel subnet route * Cannot set link to up if it's in another NS * Multiple containers on same VNET NS * Delete Endpoint routes on Delete * Minimizing netns usage * Moving NS Exec Code * Further minimized netns.Set usage * Moved helper methods down, drafted tests * Removed DevName from Route Info, more tests * Test existing vnet ns, delete endpoint * NetNS interface for testing * Separated tests by namespace * Endpoints delete if they cannot be moved into NS * Namespace netns tests * Added Native Client to deleteEndpointImpl * Deletion of Endpoints Impl and Tests * Cleaned code (Tests ok) * Moved mock/netns to package (Tests ok) * Fixing Netns (wip) Moved netnsinterface to consumer package (network). Removed "Netns" from "NewNetns" and "NewMockNetns" as it is unambiguous. Changed uintptr to int and casted the int to uintptr when needed later. * Using errors.Wrap for error context (wip) * Removed sentence case (wip) * Removing variable predeclaration * Removed NewNativeEndpointClient Directly instantiating struct because nothing special happens in NewNativeEndpointClient * Removed generics from ExecuteInNS * Removed uintptr from mocknetns, tests compile Forgot to remove uintptr from mocknetns * Fix tests, lint * Fixes from linter Works on VMSS * Replacing references to ethX with vlan veth * Removed unnecessary log * Removed unnecessary mac, fix tests * Mockns method name enum * Unable to use GetNetworkInterfaceByName due to NS If I use GetNetworkInterface, I need to be in the vnet NS, but that means I will need to call ExecuteInNS, which causes tests to fail. * Fixes from linter * Assume if NS exists, vlan veth exists Tests ok * Fixes for Linter * Snat refactor * Fix delete tests * Fix delete tests bug * More snat refactor * Breaking, prepping for Native Snat Delete native endpoint snat route linux to remove errors and in theory, ovs should work fine again. * Go mod tidy for linting Hopefully this fixes the windows lint error * Add fields to native endpoint client for snat * Using New() func to create Native Client Creation of the native endpoint client is too complicated to directly instantiate. * Snat defaults * Insert SNAT entry points * Native Snat error handling * Breaking, decouple ovsctl from snat Proposed Solution implementation Moved ovsctlClient.AddPortOnOVSBridge to ovs_endpoint_snatroute_linux.go. Removed ovsctlclient from NewSnatClient. Removed ovsctlClient from testing file. * Delete unecessary ovssnat files * No lint on vishvananda netns Maybe this will fix the windows linter? * Build linux only for netns package Maybe this fixes the linter error? * Remove nolint to see if linter fails * Breaking, removed bridgeName bridgeName refers to the OVS Switch I believe * If native uses snat bridge, should also get IP * Breaking, Decouple or Wrap snat route * Check to see if snat triggered * Snat behaviors specific to ovs/native * Pass the pointer Add/Delete ok * Renaming to make consts public * Breaking, moving ovs specific parts of snat to ovs * Remove enable infra vnet (Tests ok) Tested: Allow Host to NC only Allow NC to Host only Allow both Wget Ping between containers Warning: Enable snat is still hard coded to true!!! * Move add port to after exists() check * Moved netns interface to caller, generalized tests Tests ok, Native ok * Typos * Reordered if statement, unwrapped arp Tests ok, ping ok, wget ok * Linted, wrapping errors * Go fumpt entire network package * Code markers removed, clean (Tests ok) OVS & Native: - Ping between two containers same VM, no packets on bridge - Ping between two containers diff VM, no packets on bridge - Ping other container not in vnet, no packets on bridge - Ping snat to container, packets on bridge - Ping container to snat, packets on bridge - Tcpdump confirmed on azSnatBr - Deletion of containers deletes appropriate interfaces * Renamed veth, fixed logs * Made deleteEndpoints logic clearer, renamed error * Renamed eth0 to primaryHostIfName, vlanEth to vlanIf * Deleted debug log * Corrected merge (hardware addr) (Tests ok) * Renamed vlan veth to hostExtIf_vlanID, Disabled RA eth0.2 makes disable RA look for a folder eth0 and then another sub folder "2". ("eth0/2") However, it should look for a folder named "eth0.2" literally. To solve this, we change the naming scheme to use an underscore instead. (Tests ok) * Renamed Native to TransparentVlan Confirmed basic functionality on VM with correct mode * Make file updated * Create azure-windows-multitenancy-transparent-vlan.conflist * Unified snat err format * Rename to transparent-vlan * Route table support added to local netlink * Moved SNAT to end of function * Defer deleting vlan interface on failure
2022-08-10 23:50:26 +03:00
$(MKDIR) $(CNI_MULTITENANCY_TRANSPARENT_VLAN_BUILD_DIR)
cp cni/azure-$(GOOS)-multitenancy-transparent-vlan.conflist $(CNI_MULTITENANCY_TRANSPARENT_VLAN_BUILD_DIR)/10-azure.conflist
cp $(CNI_BUILD_DIR)/azure-vnet$(EXE_EXT) $(CNI_MULTITENANCY_TRANSPARENT_VLAN_BUILD_DIR)
feat: Add SNAT bridge to Native, decouple SNAT bridge (#1506) * Native Endpoint Client Add Endpoints * AddEndpointRules, ConfigureContainerInterfacesAndRoutes * Changed interface names, log statements nw.extIf.Name > eth0 (eth0) eth0.vlanid > eth0.X (eth0.1) %s%s hostIfName > vnet (A1veth0) %s%s-2 contIfName > container (B1veth0) * Renaming, using lib to set ns * Namespace "path" is /var/run/netns/<NS> * Loopback set up, Remove auto kernel subnet route * Cannot set link to up if it's in another NS * Multiple containers on same VNET NS * Delete Endpoint routes on Delete * Minimizing netns usage * Moving NS Exec Code * Further minimized netns.Set usage * Moved helper methods down, drafted tests * Removed DevName from Route Info, more tests * Test existing vnet ns, delete endpoint * NetNS interface for testing * Separated tests by namespace * Endpoints delete if they cannot be moved into NS * Namespace netns tests * Added Native Client to deleteEndpointImpl * Deletion of Endpoints Impl and Tests * Cleaned code (Tests ok) * Moved mock/netns to package (Tests ok) * Fixing Netns (wip) Moved netnsinterface to consumer package (network). Removed "Netns" from "NewNetns" and "NewMockNetns" as it is unambiguous. Changed uintptr to int and casted the int to uintptr when needed later. * Using errors.Wrap for error context (wip) * Removed sentence case (wip) * Removing variable predeclaration * Removed NewNativeEndpointClient Directly instantiating struct because nothing special happens in NewNativeEndpointClient * Removed generics from ExecuteInNS * Removed uintptr from mocknetns, tests compile Forgot to remove uintptr from mocknetns * Fix tests, lint * Fixes from linter Works on VMSS * Replacing references to ethX with vlan veth * Removed unnecessary log * Removed unnecessary mac, fix tests * Mockns method name enum * Unable to use GetNetworkInterfaceByName due to NS If I use GetNetworkInterface, I need to be in the vnet NS, but that means I will need to call ExecuteInNS, which causes tests to fail. * Fixes from linter * Assume if NS exists, vlan veth exists Tests ok * Fixes for Linter * Snat refactor * Fix delete tests * Fix delete tests bug * More snat refactor * Breaking, prepping for Native Snat Delete native endpoint snat route linux to remove errors and in theory, ovs should work fine again. * Go mod tidy for linting Hopefully this fixes the windows lint error * Add fields to native endpoint client for snat * Using New() func to create Native Client Creation of the native endpoint client is too complicated to directly instantiate. * Snat defaults * Insert SNAT entry points * Native Snat error handling * Breaking, decouple ovsctl from snat Proposed Solution implementation Moved ovsctlClient.AddPortOnOVSBridge to ovs_endpoint_snatroute_linux.go. Removed ovsctlclient from NewSnatClient. Removed ovsctlClient from testing file. * Delete unecessary ovssnat files * No lint on vishvananda netns Maybe this will fix the windows linter? * Build linux only for netns package Maybe this fixes the linter error? * Remove nolint to see if linter fails * Breaking, removed bridgeName bridgeName refers to the OVS Switch I believe * If native uses snat bridge, should also get IP * Breaking, Decouple or Wrap snat route * Check to see if snat triggered * Snat behaviors specific to ovs/native * Pass the pointer Add/Delete ok * Renaming to make consts public * Breaking, moving ovs specific parts of snat to ovs * Remove enable infra vnet (Tests ok) Tested: Allow Host to NC only Allow NC to Host only Allow both Wget Ping between containers Warning: Enable snat is still hard coded to true!!! * Move add port to after exists() check * Moved netns interface to caller, generalized tests Tests ok, Native ok * Typos * Reordered if statement, unwrapped arp Tests ok, ping ok, wget ok * Linted, wrapping errors * Go fumpt entire network package * Code markers removed, clean (Tests ok) OVS & Native: - Ping between two containers same VM, no packets on bridge - Ping between two containers diff VM, no packets on bridge - Ping other container not in vnet, no packets on bridge - Ping snat to container, packets on bridge - Ping container to snat, packets on bridge - Tcpdump confirmed on azSnatBr - Deletion of containers deletes appropriate interfaces * Renamed veth, fixed logs * Made deleteEndpoints logic clearer, renamed error * Renamed eth0 to primaryHostIfName, vlanEth to vlanIf * Deleted debug log * Corrected merge (hardware addr) (Tests ok) * Renamed vlan veth to hostExtIf_vlanID, Disabled RA eth0.2 makes disable RA look for a folder eth0 and then another sub folder "2". ("eth0/2") However, it should look for a folder named "eth0.2" literally. To solve this, we change the naming scheme to use an underscore instead. (Tests ok) * Renamed Native to TransparentVlan Confirmed basic functionality on VM with correct mode * Make file updated * Create azure-windows-multitenancy-transparent-vlan.conflist * Unified snat err format * Rename to transparent-vlan * Route table support added to local netlink * Moved SNAT to end of function * Defer deleting vlan interface on failure
2022-08-10 23:50:26 +03:00
cp telemetry/azure-vnet-telemetry.config $(CNI_MULTITENANCY_TRANSPARENT_VLAN_BUILD_DIR)/azure-vnet-telemetry.config
cp $(CNI_BUILD_DIR)/azure-vnet-telemetry$(EXE_EXT) $(CNI_MULTITENANCY_TRANSPARENT_VLAN_BUILD_DIR)
cd $(CNI_MULTITENANCY_TRANSPARENT_VLAN_BUILD_DIR) && $(ARCHIVE_CMD) $(CNI_MULTITENANCY_TRANSPARENT_VLAN_ARCHIVE_NAME) azure-vnet$(EXE_EXT) azure-vnet-telemetry$(EXE_EXT) 10-azure.conflist azure-vnet-telemetry.config
feat: Add SNAT bridge to Native, decouple SNAT bridge (#1506) * Native Endpoint Client Add Endpoints * AddEndpointRules, ConfigureContainerInterfacesAndRoutes * Changed interface names, log statements nw.extIf.Name > eth0 (eth0) eth0.vlanid > eth0.X (eth0.1) %s%s hostIfName > vnet (A1veth0) %s%s-2 contIfName > container (B1veth0) * Renaming, using lib to set ns * Namespace "path" is /var/run/netns/<NS> * Loopback set up, Remove auto kernel subnet route * Cannot set link to up if it's in another NS * Multiple containers on same VNET NS * Delete Endpoint routes on Delete * Minimizing netns usage * Moving NS Exec Code * Further minimized netns.Set usage * Moved helper methods down, drafted tests * Removed DevName from Route Info, more tests * Test existing vnet ns, delete endpoint * NetNS interface for testing * Separated tests by namespace * Endpoints delete if they cannot be moved into NS * Namespace netns tests * Added Native Client to deleteEndpointImpl * Deletion of Endpoints Impl and Tests * Cleaned code (Tests ok) * Moved mock/netns to package (Tests ok) * Fixing Netns (wip) Moved netnsinterface to consumer package (network). Removed "Netns" from "NewNetns" and "NewMockNetns" as it is unambiguous. Changed uintptr to int and casted the int to uintptr when needed later. * Using errors.Wrap for error context (wip) * Removed sentence case (wip) * Removing variable predeclaration * Removed NewNativeEndpointClient Directly instantiating struct because nothing special happens in NewNativeEndpointClient * Removed generics from ExecuteInNS * Removed uintptr from mocknetns, tests compile Forgot to remove uintptr from mocknetns * Fix tests, lint * Fixes from linter Works on VMSS * Replacing references to ethX with vlan veth * Removed unnecessary log * Removed unnecessary mac, fix tests * Mockns method name enum * Unable to use GetNetworkInterfaceByName due to NS If I use GetNetworkInterface, I need to be in the vnet NS, but that means I will need to call ExecuteInNS, which causes tests to fail. * Fixes from linter * Assume if NS exists, vlan veth exists Tests ok * Fixes for Linter * Snat refactor * Fix delete tests * Fix delete tests bug * More snat refactor * Breaking, prepping for Native Snat Delete native endpoint snat route linux to remove errors and in theory, ovs should work fine again. * Go mod tidy for linting Hopefully this fixes the windows lint error * Add fields to native endpoint client for snat * Using New() func to create Native Client Creation of the native endpoint client is too complicated to directly instantiate. * Snat defaults * Insert SNAT entry points * Native Snat error handling * Breaking, decouple ovsctl from snat Proposed Solution implementation Moved ovsctlClient.AddPortOnOVSBridge to ovs_endpoint_snatroute_linux.go. Removed ovsctlclient from NewSnatClient. Removed ovsctlClient from testing file. * Delete unecessary ovssnat files * No lint on vishvananda netns Maybe this will fix the windows linter? * Build linux only for netns package Maybe this fixes the linter error? * Remove nolint to see if linter fails * Breaking, removed bridgeName bridgeName refers to the OVS Switch I believe * If native uses snat bridge, should also get IP * Breaking, Decouple or Wrap snat route * Check to see if snat triggered * Snat behaviors specific to ovs/native * Pass the pointer Add/Delete ok * Renaming to make consts public * Breaking, moving ovs specific parts of snat to ovs * Remove enable infra vnet (Tests ok) Tested: Allow Host to NC only Allow NC to Host only Allow both Wget Ping between containers Warning: Enable snat is still hard coded to true!!! * Move add port to after exists() check * Moved netns interface to caller, generalized tests Tests ok, Native ok * Typos * Reordered if statement, unwrapped arp Tests ok, ping ok, wget ok * Linted, wrapping errors * Go fumpt entire network package * Code markers removed, clean (Tests ok) OVS & Native: - Ping between two containers same VM, no packets on bridge - Ping between two containers diff VM, no packets on bridge - Ping other container not in vnet, no packets on bridge - Ping snat to container, packets on bridge - Ping container to snat, packets on bridge - Tcpdump confirmed on azSnatBr - Deletion of containers deletes appropriate interfaces * Renamed veth, fixed logs * Made deleteEndpoints logic clearer, renamed error * Renamed eth0 to primaryHostIfName, vlanEth to vlanIf * Deleted debug log * Corrected merge (hardware addr) (Tests ok) * Renamed vlan veth to hostExtIf_vlanID, Disabled RA eth0.2 makes disable RA look for a folder eth0 and then another sub folder "2". ("eth0/2") However, it should look for a folder named "eth0.2" literally. To solve this, we change the naming scheme to use an underscore instead. (Tests ok) * Renamed Native to TransparentVlan Confirmed basic functionality on VM with correct mode * Make file updated * Create azure-windows-multitenancy-transparent-vlan.conflist * Unified snat err format * Rename to transparent-vlan * Route table support added to local netlink * Moved SNAT to end of function * Defer deleting vlan interface on failure
2022-08-10 23:50:26 +03:00
endif
$(MKDIR) $(CNI_SWIFT_BUILD_DIR)
cp cni/azure-$(GOOS)-swift.conflist $(CNI_SWIFT_BUILD_DIR)/10-azure.conflist
cp telemetry/azure-vnet-telemetry.config $(CNI_SWIFT_BUILD_DIR)/azure-vnet-telemetry.config
cp $(CNI_BUILD_DIR)/azure-vnet$(EXE_EXT) $(CNI_BUILD_DIR)/azure-vnet-ipam$(EXE_EXT) $(CNI_BUILD_DIR)/azure-vnet-telemetry$(EXE_EXT) $(CNI_SWIFT_BUILD_DIR)
cp $(STATELESS_CNI_BUILD_DIR)/azure-vnet$(EXE_EXT) $(CNI_SWIFT_BUILD_DIR)/azure-vnet-stateless$(EXE_EXT)
cd $(CNI_SWIFT_BUILD_DIR) && $(ARCHIVE_CMD) $(CNI_SWIFT_ARCHIVE_NAME) azure-vnet$(EXE_EXT) azure-vnet-stateless$(EXE_EXT) azure-vnet-ipam$(EXE_EXT) azure-vnet-telemetry$(EXE_EXT) 10-azure.conflist azure-vnet-telemetry.config
$(MKDIR) $(CNI_OVERLAY_BUILD_DIR)
cp cni/azure-$(GOOS)-swift-overlay.conflist $(CNI_OVERLAY_BUILD_DIR)/10-azure.conflist
cp telemetry/azure-vnet-telemetry.config $(CNI_OVERLAY_BUILD_DIR)/azure-vnet-telemetry.config
cp $(CNI_BUILD_DIR)/azure-vnet$(EXE_EXT) $(CNI_BUILD_DIR)/azure-vnet-ipam$(EXE_EXT) $(CNI_BUILD_DIR)/azure-vnet-telemetry$(EXE_EXT) $(CNI_OVERLAY_BUILD_DIR)
cp $(STATELESS_CNI_BUILD_DIR)/azure-vnet$(EXE_EXT) $(CNI_OVERLAY_BUILD_DIR)/azure-vnet-stateless$(EXE_EXT)
cd $(CNI_OVERLAY_BUILD_DIR) && $(ARCHIVE_CMD) $(CNI_OVERLAY_ARCHIVE_NAME) azure-vnet$(EXE_EXT) azure-vnet-stateless$(EXE_EXT) azure-vnet-ipam$(EXE_EXT) azure-vnet-telemetry$(EXE_EXT) 10-azure.conflist azure-vnet-telemetry.config
2023-04-30 00:50:44 +03:00
$(MKDIR) $(CNI_DUALSTACK_BUILD_DIR)
cp cni/azure-$(GOOS)-swift-overlay-dualstack.conflist $(CNI_DUALSTACK_BUILD_DIR)/10-azure.conflist
cp telemetry/azure-vnet-telemetry.config $(CNI_DUALSTACK_BUILD_DIR)/azure-vnet-telemetry.config
cp $(CNI_BUILD_DIR)/azure-vnet$(EXE_EXT) $(CNI_BUILD_DIR)/azure-vnet-telemetry$(EXE_EXT) $(CNI_DUALSTACK_BUILD_DIR)
cp $(STATELESS_CNI_BUILD_DIR)/azure-vnet$(EXE_EXT) $(CNI_DUALSTACK_BUILD_DIR)/azure-vnet-stateless$(EXE_EXT)
cd $(CNI_DUALSTACK_BUILD_DIR) && $(ARCHIVE_CMD) $(CNI_DUALSTACK_ARCHIVE_NAME) azure-vnet$(EXE_EXT) azure-vnet-stateless$(EXE_EXT) azure-vnet-telemetry$(EXE_EXT) 10-azure.conflist azure-vnet-telemetry.config
2023-04-30 00:50:44 +03:00
#baremetal mode is windows only (at least for now)
ifeq ($(GOOS),windows)
$(MKDIR) $(CNI_BAREMETAL_BUILD_DIR)
cp cni/azure-$(GOOS)-baremetal.conflist $(CNI_BAREMETAL_BUILD_DIR)/10-azure.conflist
cp $(CNI_BUILD_DIR)/azure-vnet$(EXE_EXT) $(CNI_BAREMETAL_BUILD_DIR)
cd $(CNI_BAREMETAL_BUILD_DIR) && $(ARCHIVE_CMD) $(CNI_BAREMETAL_ARCHIVE_NAME) azure-vnet$(EXE_EXT) 10-azure.conflist
endif
# Create a cli archive for the target platform.
.PHONY: acncli-archive
acncli-archive: acncli-binary
ifeq ($(GOOS),linux)
$(MKDIR) $(ACNCLI_BUILD_DIR)
cd $(ACNCLI_BUILD_DIR) && $(ARCHIVE_CMD) $(ACNCLI_ARCHIVE_NAME) acn$(EXE_EXT)
endif
# Create a CNS archive for the target platform.
.PHONY: cns-archive
cns-archive: azure-cns-binary
cp cns/configuration/cns_config.json $(CNS_BUILD_DIR)/cns_config.json
cd $(CNS_BUILD_DIR) && $(ARCHIVE_CMD) $(CNS_ARCHIVE_NAME) azure-cns$(EXE_EXT) cns_config.json
2018-07-20 02:06:11 +03:00
# Create a NPM archive for the target platform. Only Linux is supported for now.
2018-07-20 02:06:11 +03:00
.PHONY: npm-archive
npm-archive: azure-npm-binary
2018-07-20 02:06:11 +03:00
ifeq ($(GOOS),linux)
cd $(NPM_BUILD_DIR) && $(ARCHIVE_CMD) $(NPM_ARCHIVE_NAME) azure-npm$(EXE_EXT)
endif
# Create a azure-ipam archive for the target platform.
.PHONY: azure-ipam-archive
azure-ipam-archive: azure-ipam-binary
ifeq ($(GOOS),linux)
$(MKDIR) $(AZURE_IPAM_BUILD_DIR)
cd $(AZURE_IPAM_BUILD_DIR) && $(ARCHIVE_CMD) $(AZURE_IPAM_ARCHIVE_NAME) azure-ipam$(EXE_EXT)
endif
# Create a ipv6-hp-bpf archive for the target platform.
.PHONY: ipv6-hp-bpf-archive
ipv6-hp-bpf-archive: ipv6-hp-bpf-binary
ifeq ($(GOOS),linux)
$(MKDIR) $(IPV6_HP_BPF_BUILD_DIR)
cd $(IPV6_HP_BPF_BUILD_DIR) && $(ARCHIVE_CMD) $(IPV6_HP_BPF_ARCHIVE_NAME) ipv6-hp-bpf$(EXE_EXT)
endif
##@ Utils
clean: ## Clean build artifacts.
$(RMDIR) $(OUTPUT_DIR)
$(RMDIR) $(TOOLS_BIN_DIR)
$(RMDIR) go.work*
LINT_PKG ?= .
lint: $(GOLANGCI_LINT) ## Fast lint vs default branch showing only new issues.
GOGC=20 $(GOLANGCI_LINT) run --timeout 25m -v $(LINT_PKG)/...
lint-all: $(GOLANGCI_LINT) ## Lint the current branch in entirety.
GOGC=20 $(GOLANGCI_LINT) run -v $(LINT_PKG)/...
2021-08-09 19:57:59 +03:00
FMT_PKG ?= cni cns npm
fmt: $(GOFUMPT) ## run gofumpt on $FMT_PKG (default "cni cns npm").
2021-08-09 19:57:59 +03:00
$(GOFUMPT) -s -w $(FMT_PKG)
workspace: ## Set up the Go workspace.
go work init
go work use .
go work use ./azure-ipam
go work use ./build/tools
go work use ./dropgz
go work use ./zapai
##@ Test
COVER_PKG ?= .
#Restart case is used for cni load test pipeline for restarting the nodes cluster.
RESTART_CASE ?= false
# CNI type is a key to direct the types of state validation done on a cluster.
CNI_TYPE ?= cilium
test-all: ## run all unit tests.
@$(eval COVER_FILTER=`go list $(COVER_PKG)/... | tr '\n' ','`)
@echo Test coverpkg: $(COVER_FILTER)
go test -mod=readonly -buildvcs=false -tags "unit" --skip 'TestE2E*' -coverpkg=$(COVER_FILTER) -race -covermode atomic -coverprofile=coverage.out $(COVER_PKG)/...
test-integration: ## run all integration tests.
AZURE_IPAM_VERSION=$(AZURE_IPAM_VERSION) \
CNI_VERSION=$(CNI_VERSION) \
CNS_VERSION=$(CNS_VERSION) \
go test -mod=readonly -buildvcs=false -timeout 1h -coverpkg=./... -race -covermode atomic -coverprofile=coverage.out -tags=integration --skip 'TestE2E*' ./test/integration...
test-load: ## run all load tests
AZURE_IPAM_VERSION=$(AZURE_IPAM_VERSION) \
CNI_VERSION=$(CNI_VERSION)
CNS_VERSION=$(CNS_VERSION) \
go test -timeout 30m -race -tags=load ./test/integration/load... -v
test-validate-state:
cd test/integration/load && go test -mod=readonly -count=1 -timeout 30m -tags load --skip 'TestE2E*' -run ^TestValidateState
cd ../../..
test-cyclonus: ## run the cyclonus test for npm.
cd test/cyclonus && bash ./test-cyclonus.sh
cd ..
test-cyclonus-windows: ## run the cyclonus test for npm.
cd test/cyclonus && bash ./test-cyclonus.sh windows
cd ..
test-extended-cyclonus: ## run the cyclonus test for npm.
cd test/cyclonus && bash ./test-cyclonus.sh extended
cd ..
test-azure-ipam: ## run the unit test for azure-ipam
cd $(AZURE_IPAM_DIR) && go test
kind:
kind create cluster --config ./test/kind/kind.yaml
test-k8se2e: test-k8se2e-build test-k8se2e-only ## Alias to run build and test
test-k8se2e-build: ## Build k8s e2e test suite
cd hack/scripts && bash ./k8se2e.sh $(GROUP) $(CLUSTER)
cd ../..
test-k8se2e-only: ## Run k8s network conformance test, use TYPE=basic for only datapath tests
cd hack/scripts && bash ./k8se2e-tests.sh $(OS) $(TYPE)
cd ../..
2021-11-22 21:56:58 +03:00
##@ Utilities
$(REPO_ROOT)/.git/hooks/pre-push:
@ln -s $(REPO_ROOT)/.hooks/pre-push $(REPO_ROOT)/.git/hooks/
@echo installed pre-push hook
install-hooks: $(REPO_ROOT)/.git/hooks/pre-push ## installs git hooks
gitconfig: ## configure the local git repository
@git config commit.gpgsign true
@git config pull.rebase true
@git config fetch.prune true
@git config core.fsmonitor true
@git config core.untrackedcache true
setup: tools install-hooks gitconfig ## performs common required repo setup
##@ Tools
$(TOOLS_DIR)/go.mod:
cd $(TOOLS_DIR); go mod init && go mod tidy
$(CONTROLLER_GEN): $(TOOLS_DIR)/go.mod
cd $(TOOLS_DIR); go mod download; go build -o bin/controller-gen sigs.k8s.io/controller-tools/cmd/controller-gen
controller-gen: $(CONTROLLER_GEN) ## Build controller-gen
protoc:
source ${REPO_ROOT}/scripts/install-protoc.sh
$(GOCOV): $(TOOLS_DIR)/go.mod
cd $(TOOLS_DIR); go mod download; go build -o bin/gocov github.com/axw/gocov/gocov
gocov: $(GOCOV) ## Build gocov
$(GOCOV_XML): $(TOOLS_DIR)/go.mod
cd $(TOOLS_DIR); go mod download; go build -o bin/gocov-xml github.com/AlekSi/gocov-xml
gocov-xml: $(GOCOV_XML) ## Build gocov-xml
2021-08-09 19:57:59 +03:00
$(GOFUMPT): $(TOOLS_DIR)/go.mod
cd $(TOOLS_DIR); go mod download; go build -o bin/gofumpt mvdan.cc/gofumpt
gofumpt: $(GOFUMPT) ## Build gofumpt
$(GOLANGCI_LINT): $(TOOLS_DIR)/go.mod
cd $(TOOLS_DIR); go mod download; go build -o bin/golangci-lint github.com/golangci/golangci-lint/cmd/golangci-lint
golangci-lint: $(GOLANGCI_LINT) ## Build golangci-lint
2021-08-09 19:57:59 +03:00
$(GO_JUNIT_REPORT): $(TOOLS_DIR)/go.mod
cd $(TOOLS_DIR); go mod download; go build -o bin/go-junit-report github.com/jstemmer/go-junit-report
2021-08-09 19:57:59 +03:00
go-junit-report: $(GO_JUNIT_REPORT) ## Build go-junit-report
$(MOCKGEN): $(TOOLS_DIR)/go.mod
cd $(TOOLS_DIR); go mod download; go build -o bin/mockgen github.com/golang/mock/mockgen
mockgen: $(MOCKGEN) ## Build mockgen
clean-tools:
rm -r build/tools/bin
tools: acncli gocov gocov-xml go-junit-report golangci-lint gofumpt protoc ## Build bins for build tools
##@ Help
help: ## Display this help
@awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m<target>\033[0m\n"} /^[0-9a-zA-Z_-]+:.*?##/ { printf " \033[36m%-15s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST)
proto-gen: ## Generates source code from protobuf files
protoc --go_out=. --go-grpc_out=. cns/grpc/proto/server.proto