[AIRFLOW-5223] Use kind for Kubernetes in CI (#5837)
This PR reimplements Kubernetes integration testing using kind, a tool for running local Kubernetes clusters using Docker container "nodes". The "nodes" are deployed to a separate docker daemon (dind) started through docker-compose.
This commit is contained in:
Родитель
cb0dbe309b
Коммит
e62056b225
|
@ -83,7 +83,7 @@ repos:
|
|||
- --fuzzy-match-generates-todo
|
||||
- id: insert-license
|
||||
name: Add licence for all JINJA template files
|
||||
files: ^airflow/www/templates/.*\.html$|^docs/templates/.*\.html$|^airflow/contrib/plugins/metastore_browser/templates/.*\.html$ # yamllint disable-line rule:line-length
|
||||
files: ^airflow/www/templates/.*\.html$|^docs/templates/.*\.html$|^airflow/contrib/plugins/metastore_browser/templates/.*\.html$ # yamllint disable-line rule:line-length
|
||||
exclude: ^\.github/.*$|^airflow/_vendor/.*$
|
||||
args:
|
||||
- --comment-style
|
||||
|
|
|
@ -44,11 +44,11 @@ jobs:
|
|||
python: "3.7"
|
||||
stage: test
|
||||
- name: "Tests postgres kubernetes python 3.6 (persistent)"
|
||||
env: BACKEND=postgres ENV=kubernetes KUBERNETES_VERSION=v1.13.0 KUBERNETES_MODE=persistent_mode
|
||||
env: BACKEND=postgres ENV=kubernetes KUBERNETES_VERSION=v1.15.0 KUBERNETES_MODE=persistent_mode
|
||||
python: "3.6"
|
||||
stage: test
|
||||
- name: "Tests postgres kubernetes python 3.6 (git)"
|
||||
env: BACKEND=postgres ENV=kubernetes KUBERNETES_VERSION=v1.13.0 KUBERNETES_MODE=git_mode
|
||||
env: BACKEND=postgres ENV=kubernetes KUBERNETES_VERSION=v1.15.0 KUBERNETES_MODE=git_mode
|
||||
python: "3.6"
|
||||
stage: test
|
||||
- name: "Static checks (no pylint, no licence check)"
|
||||
|
|
46
Dockerfile
46
Dockerfile
|
@ -56,7 +56,6 @@ RUN apt-get update \
|
|||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
|
||||
# Install basic apt dependencies
|
||||
RUN curl -sL https://deb.nodesource.com/setup_10.x | bash - \
|
||||
&& apt-get update \
|
||||
|
@ -135,6 +134,10 @@ ENV JAVA_HOME=/usr/lib/jvm/java-8-openjdk-amd64/
|
|||
|
||||
ARG APT_DEPS_IMAGE="airflow-apt-deps-ci-slim"
|
||||
ENV APT_DEPS_IMAGE=${APT_DEPS_IMAGE}
|
||||
ARG KUBERNETES_VERSION="v1.15.0"
|
||||
ENV KUBERNETES_VERSION=${KUBERNETES_VERSION}
|
||||
ARG KIND_VERSION="v0.5.0"
|
||||
ENV KIND_VERSION=${KIND_VERSION}
|
||||
|
||||
RUN echo "${APT_DEPS_IMAGE}"
|
||||
|
||||
|
@ -148,6 +151,9 @@ RUN if [[ "${APT_DEPS_IMAGE}" == "airflow-apt-deps-ci" ]]; then \
|
|||
&& apt-get update \
|
||||
&& apt-get install --no-install-recommends -y \
|
||||
gnupg \
|
||||
apt-transport-https \
|
||||
ca-certificates \
|
||||
software-properties-common \
|
||||
krb5-user \
|
||||
ldap-utils \
|
||||
less \
|
||||
|
@ -171,6 +177,44 @@ RUN if [[ "${APT_DEPS_IMAGE}" == "airflow-apt-deps-ci" ]]; then \
|
|||
# TODO: We should think about removing those and moving them into docker-compose dependencies.
|
||||
COPY scripts/ci/docker_build/ci_build_install_deps.sh /tmp/ci_build_install_deps.sh
|
||||
|
||||
# Kubernetes dependencies
|
||||
RUN \
|
||||
if [[ "${APT_DEPS_IMAGE}" == "airflow-apt-deps-ci" ]]; then \
|
||||
curl -fsSL https://download.docker.com/linux/debian/gpg | apt-key add - \
|
||||
&& add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/debian stretch stable" \
|
||||
&& apt-get update \
|
||||
&& apt-get -y install --no-install-recommends docker-ce \
|
||||
&& apt-get autoremove -yqq --purge \
|
||||
&& apt-get clean && rm -rf /var/lib/apt/lists/* \
|
||||
;\
|
||||
fi
|
||||
|
||||
RUN \
|
||||
if [[ "${APT_DEPS_IMAGE}" == "airflow-apt-deps-ci" ]]; then \
|
||||
curl -Lo kubectl \
|
||||
"https://storage.googleapis.com/kubernetes-release/release/${KUBERNETES_VERSION}/bin/linux/amd64/kubectl" \
|
||||
&& chmod +x kubectl \
|
||||
&& mv kubectl /usr/local/bin/kubectl \
|
||||
;\
|
||||
fi
|
||||
|
||||
RUN \
|
||||
if [[ "${APT_DEPS_IMAGE}" == "airflow-apt-deps-ci" ]]; then \
|
||||
curl -Lo kind \
|
||||
"https://github.com/kubernetes-sigs/kind/releases/download/${KIND_VERSION}/kind-linux-amd64" \
|
||||
&& chmod +x kind \
|
||||
&& mv kind /usr/local/bin/kind \
|
||||
;\
|
||||
fi
|
||||
|
||||
ENV HADOOP_DISTRO=cdh \
|
||||
HADOOP_MAJOR=5 \
|
||||
HADOOP_DISTRO_VERSION=5.11.0 \
|
||||
HADOOP_VERSION=2.6.0 \
|
||||
HIVE_VERSION=1.1.0
|
||||
ENV HADOOP_URL=https://archive.cloudera.com/${HADOOP_DISTRO}${HADOOP_MAJOR}/${HADOOP_DISTRO}/${HADOOP_MAJOR}/
|
||||
ENV HADOOP_HOME=/tmp/hadoop-cdh HIVE_HOME=/tmp/hive
|
||||
|
||||
RUN if [[ "${APT_DEPS_IMAGE}" == "airflow-apt-deps-ci" ]]; then /tmp/ci_build_install_deps.sh; fi
|
||||
|
||||
ENV PATH "${PATH}:/tmp/hive/bin"
|
||||
|
|
|
@ -30,12 +30,6 @@ script_start
|
|||
build_image_on_ci
|
||||
|
||||
KUBERNETES_VERSION=${KUBERNETES_VERSION:=""}
|
||||
# Required for K8s v1.10.x. See
|
||||
# https://github.com/kubernetes/kubernetes/issues/61058#issuecomment-372764783
|
||||
if [[ "${KUBERNETES_VERSION}" == "" ]]; then
|
||||
sudo mount --make-shared /
|
||||
sudo service docker restart
|
||||
fi
|
||||
|
||||
sudo pip install pre-commit
|
||||
|
||||
|
|
|
@ -74,18 +74,11 @@ elif [[ "${ENV}" == "kubernetes" ]]; then
|
|||
echo
|
||||
echo "Running kubernetes tests in ${KUBERNETES_MODE}"
|
||||
echo
|
||||
"${MY_DIR}/kubernetes/minikube/stop_minikube.sh"
|
||||
"${MY_DIR}/kubernetes/setup_kubernetes.sh"
|
||||
"${MY_DIR}/kubernetes/kube/deploy.sh" -d "${KUBERNETES_MODE}"
|
||||
MINIKUBE_IP=$(minikube ip)
|
||||
export MINIKUBE_IP
|
||||
docker-compose --log-level ERROR \
|
||||
-f "${MY_DIR}/docker-compose.yml" \
|
||||
-f "${MY_DIR}/docker-compose-${BACKEND}.yml" \
|
||||
-f "${MY_DIR}/docker-compose-kubernetes.yml" \
|
||||
"${DOCKER_COMPOSE_LOCAL[@]}" \
|
||||
run --no-deps airflow-testing /opt/airflow/scripts/ci/in_container/entrypoint_ci.sh;
|
||||
"${MY_DIR}/kubernetes/minikube/stop_minikube.sh"
|
||||
run airflow-testing /opt/airflow/scripts/ci/in_container/entrypoint_ci.sh;
|
||||
echo
|
||||
echo "Finished Running kubernetes tests in ${KUBERNETES_MODE}"
|
||||
echo
|
||||
|
|
|
@ -24,6 +24,7 @@ services:
|
|||
- POSTGRES_USER=postgres
|
||||
- POSTGRES_PASSWORD=airflow
|
||||
- POSTGRES_DB=airflow
|
||||
|
||||
mysql:
|
||||
image: mysql:5.6
|
||||
environment:
|
||||
|
@ -54,11 +55,19 @@ services:
|
|||
- LDAP_CONFIG_PASSWORD=insecure
|
||||
volumes:
|
||||
- ./openldap/ldif:/container/service/slapd/assets/config/bootstrap/ldif/custom:ro
|
||||
|
||||
krb5-kdc-server:
|
||||
image: godatadriven/krb5-kdc-server
|
||||
hostname: krb5-kdc-server
|
||||
domainname: example.com
|
||||
|
||||
docker:
|
||||
image: docker:19.03.2-dind
|
||||
privileged: true
|
||||
environment:
|
||||
- DOCKER_TLS_CERTDIR=
|
||||
- DOCKER_TLS_VERIFY=
|
||||
|
||||
airflow-testing:
|
||||
image: ${AIRFLOW_CONTAINER_DOCKER_IMAGE}
|
||||
init: true
|
||||
|
@ -74,6 +83,7 @@ services:
|
|||
- KRB5_CONFIG=/etc/krb5.conf
|
||||
- KRB5_KTNAME=/etc/airflow.keytab
|
||||
- CELERY_BROKER_URLS=amqp://guest:guest@rabbitmq:5672,redis://redis:6379/0
|
||||
- DOCKER_HOST=tcp://docker:2375
|
||||
- ENV
|
||||
- BACKEND
|
||||
- CI
|
||||
|
@ -91,6 +101,9 @@ services:
|
|||
- RUN_TESTS
|
||||
- AIRFLOW_CI_VERBOSE
|
||||
- AIRFLOW_CI_SILENT
|
||||
- AIRFLOW_CONTAINER_DOCKER_IMAGE
|
||||
- KUBERNETES_VERSION
|
||||
- KUBERNETES_MODE
|
||||
depends_on:
|
||||
- postgres
|
||||
- mysql
|
||||
|
@ -100,3 +113,4 @@ services:
|
|||
- redis
|
||||
- openldap
|
||||
- krb5-kdc-server
|
||||
- docker
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
#
|
||||
# Bash sanity settings (error on exit, complain for undefined vars, error when pipe fails)
|
||||
set -euo pipefail
|
||||
|
||||
MY_DIR=$(cd "$(dirname "$0")" || exit 1; pwd)
|
||||
|
||||
if [[ ${AIRFLOW_CI_VERBOSE:="false"} == "true" ]]; then
|
||||
|
@ -132,11 +133,6 @@ if [[ ! -h /home/travis/build/apache/airflow ]]; then
|
|||
sudo ln -s "${AIRFLOW_SOURCES}" /home/travis/build/apache/airflow
|
||||
fi
|
||||
|
||||
# Fix file permissions
|
||||
if [[ -d "${HOME}/.minikube" ]]; then
|
||||
sudo chown -R "${AIRFLOW_USER}.${AIRFLOW_USER}" "${HOME}/.kube" "${HOME}/.minikube"
|
||||
fi
|
||||
|
||||
# Cleanup the logs, tmp when entering the environment
|
||||
sudo rm -rf "${AIRFLOW_SOURCES}"/logs/*
|
||||
sudo rm -rf "${AIRFLOW_SOURCES}"/tmp/*
|
||||
|
@ -255,19 +251,14 @@ if [[ -z "${KUBERNETES_VERSION}" ]]; then
|
|||
echo
|
||||
"${MY_DIR}/run_ci_tests.sh" "${ARGS[@]}"
|
||||
else
|
||||
export KUBERNETES_VERSION
|
||||
export MINIKUBE_IP
|
||||
# This script runs inside a container, the path of the kubernetes certificate
|
||||
# is /home/travis/.minikube/client.crt but the user in the container is `root`
|
||||
# TODO: Check this. This should be made travis-independent :D
|
||||
if [[ ! -d /home/travis ]]; then
|
||||
sudo mkdir -p /home/travis
|
||||
fi
|
||||
sudo ln -s /root/.minikube /home/travis/.minikube
|
||||
echo "Set up Kubernetes cluster for tests"
|
||||
"${MY_DIR}/../kubernetes/setup_kubernetes.sh"
|
||||
"${MY_DIR}/../kubernetes/app/deploy_app.sh" -d "${KUBERNETES_MODE}"
|
||||
|
||||
echo
|
||||
echo "Running CI tests with ${ARGS[*]}"
|
||||
echo
|
||||
"${MY_DIR}/run_ci_tests.sh" tests.minikube "${ARGS[@]}"
|
||||
"${MY_DIR}/run_ci_tests.sh" tests.integration.kubernetes "${ARGS[@]}"
|
||||
fi
|
||||
|
||||
in_container_script_end
|
||||
|
|
|
@ -1,42 +0,0 @@
|
|||
<!--
|
||||
Licensed to the Apache Software Foundation (ASF) under one
|
||||
or more contributor license agreements. See the NOTICE file
|
||||
distributed with this work for additional information
|
||||
regarding copyright ownership. The ASF licenses this file
|
||||
to you under the Apache License, Version 2.0 (the
|
||||
"License"); you may not use this file except in compliance
|
||||
with the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing,
|
||||
software distributed under the License is distributed on an
|
||||
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations
|
||||
under the License.
|
||||
-->
|
||||
# Airflow on Kubernetes
|
||||
|
||||
If you don't have minikube installed, please run `./minikube/start_minikube.sh`
|
||||
to start a k8s-instance on your local machine. Make sure that your `kubectl` is
|
||||
pointing to the local k8s instance.
|
||||
|
||||
First build the docker images by running `./docker/build.sh`. This will build
|
||||
the image and push it to the local registry.
|
||||
Secondly, deploy Apache Airflow using `./kube/deploy.sh -d persistent_mode` or
|
||||
`./kube/deploy.sh -d git_mode`.
|
||||
Finally, open the Airflow webserver
|
||||
page by browsing to `http://192.168.99.100:30809/admin/` (on OSX).
|
||||
|
||||
When kicking of a new job, you should be able to see new pods being kicked off:
|
||||
|
||||
```
|
||||
$ kubectl get pods
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
airflow-6cf894578b-rkcpm 2/2 Running 0 36m
|
||||
examplehttpoperatorhttpsensorcheck-490dc90941984812b934fceedf07ca81 1/1 Running 0 7s
|
||||
examplehttpoperatorhttpsensorcheck-ea787dd2163243a78dfba96d81f47e0d 1/1 Running 0 9s
|
||||
examplehttpoperatorpostop-3637d44e1b8a42789c59d2c6a66bec6a 0/1 ContainerCreating 0 0s
|
||||
postgres-airflow-b4844754f-b8d8k 1/1 Running 0 36m
|
||||
```
|
|
@ -150,12 +150,6 @@ ${SED_COMMAND} -i "s|{{CONFIGMAP_DAGS_VOLUME_CLAIM}}|${CONFIGMAP_DAGS_VOLUME_CLA
|
|||
cat "${BUILD_DIRNAME}/airflow.yaml"
|
||||
cat "${BUILD_DIRNAME}/configmaps.yaml"
|
||||
|
||||
# Fix file permissions
|
||||
# TODO: Check this - this should be TRAVIS-independent
|
||||
if [[ "${TRAVIS}" == true ]]; then
|
||||
sudo chown -R travis.travis "$HOME/.kube" "$HOME/.minikube"
|
||||
fi
|
||||
|
||||
kubectl delete -f "${DIRNAME}/postgres.yaml"
|
||||
kubectl delete -f "${BUILD_DIRNAME}/airflow.yaml"
|
||||
kubectl delete -f "${DIRNAME}/secrets.yaml"
|
||||
|
@ -212,13 +206,13 @@ else
|
|||
fi
|
||||
|
||||
# Wait until Airflow webserver is up
|
||||
MINIKUBE_IP=$(minikube ip)
|
||||
KUBERNETES_HOST=docker
|
||||
AIRFLOW_WEBSERVER_IS_READY="0"
|
||||
CONSECUTIVE_SUCCESS_CALLS=0
|
||||
for i in {1..30}
|
||||
do
|
||||
echo "------- Wait until webserver is up: $i -------"
|
||||
HTTP_CODE=$(curl -LI "http://${MINIKUBE_IP}:30809/health" -o /dev/null -w '%{http_code}\n' -sS) || true
|
||||
HTTP_CODE=$(curl -LI "http://${KUBERNETES_HOST}:30809/health" -o /dev/null -w '%{http_code}\n' -sS) || true
|
||||
if [[ "${HTTP_CODE}" == 200 ]]; then
|
||||
(( CONSECUTIVE_SUCCESS_CALLS+=1 ))
|
||||
else
|
|
@ -63,7 +63,7 @@ spec:
|
|||
- "bash"
|
||||
args:
|
||||
- "-cx"
|
||||
- "./tmp/airflow-test-env-init.sh"
|
||||
- "/tmp/airflow-test-env-init.sh"
|
||||
{{INIT_GIT_SYNC}}
|
||||
containers:
|
||||
- name: webserver
|
|
@ -14,36 +14,8 @@
|
|||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
FROM python:3.6-slim
|
||||
|
||||
# install deps
|
||||
RUN apt-get update -y && apt-get install -y \
|
||||
wget \
|
||||
libczmq-dev \
|
||||
curl \
|
||||
libssl-dev \
|
||||
git \
|
||||
libpq-dev \
|
||||
inetutils-telnet \
|
||||
bind9utils \
|
||||
zip \
|
||||
unzip \
|
||||
gcc \
|
||||
&& apt-get clean
|
||||
|
||||
RUN pip install --upgrade pip
|
||||
|
||||
# Since we install vanilla Airflow, we also want to have support for Postgres and Kubernetes
|
||||
RUN pip install -U setuptools && \
|
||||
pip install kubernetes && \
|
||||
pip install cryptography && \
|
||||
pip install psycopg2-binary==2.7.4 # I had issues with older versions of psycopg2, just a warning
|
||||
|
||||
# install airflow
|
||||
COPY requirements.txt /tmp/requirements.txt
|
||||
RUN pip install -r /tmp/requirements.txt
|
||||
COPY airflow.tar.gz /tmp/airflow.tar.gz
|
||||
RUN pip install --no-use-pep517 /tmp/airflow.tar.gz
|
||||
ARG AIRFLOW_CI_IMAGE
|
||||
FROM ${AIRFLOW_CI_IMAGE}
|
||||
|
||||
COPY airflow-test-env-init.sh /tmp/airflow-test-env-init.sh
|
||||
|
||||
|
|
|
@ -18,7 +18,7 @@
|
|||
|
||||
set -x
|
||||
|
||||
cd /usr/local/lib/python3.6/site-packages/airflow && \
|
||||
cd /opt/airflow/airflow && \
|
||||
cp -R example_dags/* /root/airflow/dags/ && \
|
||||
cp -R contrib/example_dags/example_kubernetes_*.py /root/airflow/dags/ && \
|
||||
cp -a contrib/example_dags/libs /root/airflow/dags/ && \
|
||||
|
|
|
@ -18,38 +18,14 @@
|
|||
|
||||
IMAGE=${IMAGE:-airflow}
|
||||
TAG=${TAG:-latest}
|
||||
DIRNAME=$(cd "$(dirname "$0")" || exit 1; pwd)
|
||||
AIRFLOW_SOURCES=$(cd "${DIRNAME}/../../../.." || exit 1; pwd)
|
||||
PYTHON_DOCKER_IMAGE=python:3.6-slim
|
||||
DIRNAME=$(cd "$(dirname "$0")" && pwd)
|
||||
AIRFLOW_ROOT="${DIRNAME}/../../../.."
|
||||
|
||||
set -e
|
||||
|
||||
# Don't rebuild the image more than once on travis
|
||||
if [[ -n "${TRAVIS}" || -z "${AIRFLOW_CI_REUSE_K8S_IMAGE}" ]] && \
|
||||
docker image inspect "${IMAGE}:${TAG}" > /dev/null 2>/dev/null; then
|
||||
echo "Re-using existing image"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if [[ "${VM_DRIVER:-none}" != "none" ]]; then
|
||||
if ENVCONFIG=$(minikube docker-env); then
|
||||
eval "${ENVCONFIG}"
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "Airflow directory ${AIRFLOW_SOURCES}"
|
||||
echo "Airflow directory ${AIRFLOW_ROOT}"
|
||||
echo "Airflow Docker directory ${DIRNAME}"
|
||||
|
||||
cd "${AIRFLOW_SOURCES}"
|
||||
docker run -ti --rm -v "${AIRFLOW_SOURCES}:/airflow" \
|
||||
-w /airflow "${PYTHON_DOCKER_IMAGE}" ./scripts/ci/kubernetes/docker/compile.sh
|
||||
cd "${DIRNAME}" && docker build --build-arg AIRFLOW_CI_IMAGE="${AIRFLOW_CONTAINER_DOCKER_IMAGE}" --pull "${DIRNAME}" --tag="${IMAGE}:${TAG}"
|
||||
|
||||
pip freeze | grep -v airflow | grep -v mysql> "${DIRNAME}/requirements.txt"
|
||||
|
||||
sudo rm -rf "${AIRFLOW_SOURCES}/airflow/www/node_modules"
|
||||
sudo rm -rf "${AIRFLOW_SOURCES}/airflow/www_rbac/node_modules"
|
||||
|
||||
echo "Copy distro ${AIRFLOW_SOURCES}/dist/*.tar.gz ${DIRNAME}/airflow.tar.gz"
|
||||
cp "${AIRFLOW_SOURCES}"/dist/*.tar.gz "${DIRNAME}/airflow.tar.gz"
|
||||
cd "${DIRNAME}" && docker build --pull "${DIRNAME}" --tag="${IMAGE}:${TAG}"
|
||||
rm "${DIRNAME}/airflow.tar.gz"
|
||||
kind load docker-image "${IMAGE}:${TAG}"
|
||||
|
|
|
@ -15,16 +15,22 @@
|
|||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
---
|
||||
version: "2.2"
|
||||
services:
|
||||
airflow-testing:
|
||||
network_mode: host
|
||||
environment:
|
||||
- KUBERNETES_VERSION
|
||||
- KUBERNETES_MODE
|
||||
- MINIKUBE_IP
|
||||
volumes:
|
||||
- /usr/local/bin/kubectl:/usr/local/bin/kubectl
|
||||
- /usr/local/bin/minikube:/usr/local/bin/minikube
|
||||
- ~/.kube:/root/.kube
|
||||
- ~/.minikube:/root/.minikube
|
||||
kind: Cluster
|
||||
apiVersion: kind.sigs.k8s.io/v1alpha3
|
||||
networking:
|
||||
apiServerAddress: 0.0.0.0
|
||||
apiServerPort: 19090
|
||||
nodes:
|
||||
- role: control-plane
|
||||
- role: worker
|
||||
extraPortMappings:
|
||||
- containerPort: 30809
|
||||
hostPort: 30809
|
||||
kubeadmConfigPatchesJson6902:
|
||||
- group: kubeadm.k8s.io
|
||||
version: v1beta2
|
||||
kind: ClusterConfiguration
|
||||
patch: |
|
||||
- op: add
|
||||
path: /apiServer/certSANs/-
|
||||
value: docker
|
|
@ -1,66 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
# This script was based on one made by @kimoonkim for kubernetes-hdfs
|
||||
# Helper bash functions.
|
||||
# Wait for Kubernetes resources to be up and ready.
|
||||
function _wait_for_ready () {
|
||||
local COUNT="$1"
|
||||
shift
|
||||
local EVIDENCE="$1"
|
||||
shift
|
||||
local ATTEMPTS=40
|
||||
echo "Waiting till ready (count: ${COUNT}): $*"
|
||||
while [[ "${COUNT}" < $("$@" 2>&1 | tail -n +2 | awk '{print $2}' | grep -c "${EVIDENCE}") ]];
|
||||
do
|
||||
if [[ "${ATTEMPTS}" = "1" ]]; then
|
||||
echo "Last run: $*"
|
||||
"$@" || true
|
||||
local command="$*"
|
||||
command="${command/get/describe}"
|
||||
${command} || true
|
||||
fi
|
||||
(( ATTEMPTS-- )) || return 1
|
||||
sleep 5
|
||||
done
|
||||
"$@" || true
|
||||
}
|
||||
|
||||
# Wait for all expected number of nodes to be ready
|
||||
function k8s_all_nodes_ready () {
|
||||
local count="$1"
|
||||
shift
|
||||
_wait_for_ready "$count" "-v NotReady" kubectl get nodes
|
||||
_wait_for_ready "$count" Ready kubectl get nodes
|
||||
}
|
||||
|
||||
function k8s_single_node_ready () {
|
||||
k8s_all_nodes_ready 1
|
||||
}
|
||||
|
||||
# Wait for at least expected number of pods to be ready.
|
||||
function k8s_at_least_n_pods_ready () {
|
||||
local COUNT="$1"
|
||||
shift
|
||||
local EVIDENCE="-E '([0-9])\/(\1)'"
|
||||
_wait_for_ready "${COUNT}" "{EVIDENCE}" kubectl get pods "$@"
|
||||
}
|
||||
|
||||
function k8s_single_pod_ready () {
|
||||
k8s_at_least_n_pods_ready 1 "$@"
|
||||
}
|
|
@ -1,132 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
# This script was based on one made by @kimoonkim for kubernetes-hdfs
|
||||
set -ex
|
||||
|
||||
_MY_SCRIPT="${BASH_SOURCE[0]}"
|
||||
_MY_DIR=$(cd "$(dirname "$_MY_SCRIPT")" && pwd)
|
||||
# Avoids 1.7.x because of https://github.com/kubernetes/minikube/issues/2240
|
||||
_KUBERNETES_VERSION=${KUBERNETES_VERSION:=""}
|
||||
_MINIKUBE_VERSION="${MINIKUBE_VERSION:-v0.34.1}"
|
||||
|
||||
echo "setting up kubernetes ${_KUBERNETES_VERSION}, using minikube ${_MINIKUBE_VERSION}"
|
||||
|
||||
_VM_DRIVER="${VM_DRIVER:-none}"
|
||||
USE_MINIKUBE_DRIVER_NONE=true
|
||||
|
||||
_UNAME_OUT=$(uname -s)
|
||||
case "${_UNAME_OUT}" in
|
||||
Linux*) _MY_OS=linux;;
|
||||
Darwin*) _MY_OS=darwin;;
|
||||
*) echo "${_UNAME_OUT} is unsupported."
|
||||
exit 1;;
|
||||
esac
|
||||
echo "Local OS is ${_MY_OS}"
|
||||
|
||||
export MINIKUBE_WANTREPORTERRORPROMPT=false
|
||||
export CHANGE_MINIKUBE_NONE_USER=true
|
||||
|
||||
cd "${_MY_DIR}"
|
||||
|
||||
# shellcheck source=scripts/ci/kubernetes/minikube/_k8s.sh
|
||||
source "_k8s.sh"
|
||||
|
||||
rm -rf tmp
|
||||
mkdir -p bin tmp
|
||||
|
||||
if [[ ! -d /usr/local/bin ]]; then
|
||||
sudo mkdir -p /usr/local/bin
|
||||
fi
|
||||
|
||||
if [[ ! -x /usr/local/bin/kubectl ]]; then
|
||||
echo Downloading kubectl, which is a requirement for using minikube.
|
||||
curl -Lo bin/kubectl \
|
||||
https://storage.googleapis.com/kubernetes-release/release/${_KUBERNETES_VERSION}/bin/${_MY_OS}/amd64/kubectl
|
||||
chmod +x bin/kubectl
|
||||
sudo mv bin/kubectl /usr/local/bin/kubectl
|
||||
fi
|
||||
if [[ ! -x /usr/local/bin/minikube ]]; then
|
||||
echo Downloading minikube.
|
||||
curl -Lo bin/minikube \
|
||||
"https://storage.googleapis.com/minikube/releases/${_MINIKUBE_VERSION}/minikube-${_MY_OS}-amd64"
|
||||
chmod +x bin/minikube
|
||||
sudo mv bin/minikube /usr/local/bin/minikube
|
||||
fi
|
||||
|
||||
export PATH="${_MY_DIR}/bin:$PATH"
|
||||
|
||||
if [[ "${USE_MINIKUBE_DRIVER_NONE:-}" = "true" ]]; then
|
||||
# Run minikube with none driver.
|
||||
# See https://blog.travis-ci.com/2017-10-26-running-kubernetes-on-travis-ci-with-minikube
|
||||
_VM_DRIVER=none
|
||||
if [[ ! -x /usr/local/bin/nsenter ]]; then
|
||||
# From https://engineering.bitnami.com/articles/implementing-kubernetes-integration-tests-in-travis.html
|
||||
# Travis ubuntu trusty env doesn't have nsenter, needed for --vm-driver=none
|
||||
echo "INFO: Building 'nsenter' ..."
|
||||
cat <<-EOF | docker run -i --rm -v "$(pwd):/build" ubuntu:14.04 >& nsenter.build.log
|
||||
apt-get update
|
||||
apt-get install -qy git bison build-essential autopoint libtool automake autoconf gettext pkg-config
|
||||
git clone --depth 1 git://git.kernel.org/pub/scm/utils/util-linux/util-linux.git /tmp/util-linux
|
||||
cd /tmp/util-linux
|
||||
./autogen.sh
|
||||
./configure --without-python --disable-all-programs --enable-nsenter
|
||||
make nsenter
|
||||
cp -pfv nsenter /build
|
||||
EOF
|
||||
if [[ ! -f ./nsenter ]]; then
|
||||
echo "ERROR: nsenter build failed, log:"
|
||||
cat nsenter.build.log
|
||||
exit 1
|
||||
fi
|
||||
echo "INFO: nsenter build OK"
|
||||
sudo mv ./nsenter /usr/local/bin
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "your path is ${PATH}"
|
||||
|
||||
_MINIKUBE="sudo -E PATH=$PATH minikube"
|
||||
|
||||
${_MINIKUBE} config set WantUpdateNotification false
|
||||
${_MINIKUBE} start --kubernetes-version=${_KUBERNETES_VERSION} --vm-driver=${_VM_DRIVER}
|
||||
${_MINIKUBE} update-context
|
||||
# TODO: Check This - it should be travis-independent
|
||||
if [[ "${TRAVIS}" == true ]]; then
|
||||
sudo chown -R travis.travis "${HOME}/.kube" "${HOME}/.minikube"
|
||||
fi
|
||||
|
||||
# Wait for Kubernetes to be up and ready.
|
||||
k8s_single_node_ready
|
||||
|
||||
echo Minikube addons:
|
||||
${_MINIKUBE} addons list
|
||||
kubectl get storageclass
|
||||
echo Showing kube-system pods
|
||||
kubectl get -n kube-system pods
|
||||
|
||||
(k8s_single_pod_ready -n kube-system -l component=kube-addon-manager) ||
|
||||
(_ADDON=$(kubectl get pod -n kube-system -l component=kube-addon-manager \
|
||||
--no-headers -o name| cut -d/ -f2);
|
||||
echo Addon-manager describe:;
|
||||
kubectl describe pod -n kube-system "${_ADDON}";
|
||||
echo Addon-manager log:;
|
||||
kubectl logs -n kube-system "${_ADDON}";
|
||||
exit 1)
|
||||
k8s_single_pod_ready -n kube-system -l k8s-app=kube-dns
|
||||
k8s_single_pod_ready -n kube-system storage-provisioner
|
|
@ -1,42 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
# This script was based on one made by @kimoonkim for kubernetes-hdfs
|
||||
set -ex
|
||||
|
||||
if [[ ! -x /usr/local/bin/minikube ]]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Fix file permissions
|
||||
# TODO: Change this - this should be Travis independent
|
||||
if [[ "${TRAVIS}" == true ]]; then
|
||||
sudo chown -R travis.travis "${HOME}/.kube" "${HOME}/.minikube" 2>/dev/null || true
|
||||
fi
|
||||
set +e
|
||||
|
||||
if sudo minikube status; then
|
||||
sudo minikube delete
|
||||
sudo rm -rf "${HOME}/.kube" "${HOME}/.minikube"
|
||||
if [[ "${TRAVIS}" == true ]]; then
|
||||
sudo rm -rf /etc/kubernetes/*.conf
|
||||
fi
|
||||
fi
|
||||
set -e
|
||||
|
||||
sudo chown -R travis.travis . || true
|
|
@ -17,21 +17,21 @@
|
|||
# under the License.
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
echo "This script downloads minikube, starts a driver=None minikube cluster, builds the airflow source\
|
||||
and docker image, and then deploys airflow onto kubernetes"
|
||||
echo "For development, start minikube yourself (ie: minikube start) then run this script as you probably\
|
||||
do not want a driver=None minikube cluster"
|
||||
|
||||
DIRNAME=$(cd "$(dirname "$0")" && pwd)
|
||||
|
||||
# Fix file permissions
|
||||
# TODO: change this - it should be Travis independent
|
||||
if [[ "${TRAVIS:=}" == true ]]; then
|
||||
sudo chown -R travis.travis .
|
||||
fi
|
||||
# start kubernetes
|
||||
kind delete cluster || true
|
||||
kind create cluster --config "${DIRNAME}/kind-cluster-conf.yaml"
|
||||
mv "$(kind get kubeconfig-path)" ~/.kube/config
|
||||
kubectl config set clusters.kind.server https://docker:19090
|
||||
kubectl cluster-info
|
||||
|
||||
kubectl get nodes
|
||||
echo "Showing storageClass"
|
||||
kubectl get storageclass
|
||||
echo "Showing kube-system pods"
|
||||
kubectl get -n kube-system pods
|
||||
|
||||
"${DIRNAME}/minikube/start_minikube.sh"
|
||||
"${DIRNAME}/docker/build.sh"
|
||||
|
||||
echo "Airflow environment on kubernetes is good to go!"
|
||||
|
|
|
@ -45,7 +45,6 @@ export HOST_GROUP_ID
|
|||
|
||||
docker-compose \
|
||||
-f "${MY_DIR}/docker-compose.yml" \
|
||||
-f "${MY_DIR}/docker-compose-kubernetes.yml" \
|
||||
-f "${MY_DIR}/docker-compose-local.yml" \
|
||||
-f "${MY_DIR}/docker-compose-mysql.yml" \
|
||||
-f "${MY_DIR}/docker-compose-postgres.yml" \
|
||||
|
|
17
scripts/ci/kubernetes/docker/compile.sh → tests/integration/kubernetes/__init__.py
Executable file → Normal file
17
scripts/ci/kubernetes/docker/compile.sh → tests/integration/kubernetes/__init__.py
Executable file → Normal file
|
@ -1,4 +1,5 @@
|
|||
#!/usr/bin/env bash
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
|
@ -15,17 +16,3 @@
|
|||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
set -e
|
||||
|
||||
export DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
apt-get update
|
||||
apt-get install -y --no-install-recommends curl gnupg2
|
||||
|
||||
curl -sL https://deb.nodesource.com/setup_8.x | bash -
|
||||
|
||||
apt-get update
|
||||
apt-get install -y --no-install-recommends git nodejs
|
||||
pip install GitPython
|
||||
python setup.py compile_assets sdist -q
|
|
@ -34,19 +34,11 @@ except Exception as e: # pylint: disable=broad-except
|
|||
raise e
|
||||
else:
|
||||
raise unittest.SkipTest(
|
||||
"Kubernetes integration tests require a minikube cluster;"
|
||||
"Kubernetes integration tests require a kubernetes cluster;"
|
||||
"Skipping tests {}".format(e)
|
||||
)
|
||||
|
||||
|
||||
def get_minikube_host():
|
||||
if "MINIKUBE_IP" in os.environ:
|
||||
host_ip = os.environ['MINIKUBE_IP']
|
||||
else:
|
||||
host_ip = check_output(['/usr/local/bin/minikube', 'ip']).decode('UTF-8')
|
||||
|
||||
host = '{}:30809'.format(host_ip.strip())
|
||||
return host
|
||||
KUBERNETES_HOST = 'docker:30809'
|
||||
|
||||
|
||||
class TestKubernetesExecutor(unittest.TestCase):
|
||||
|
@ -67,7 +59,7 @@ class TestKubernetesExecutor(unittest.TestCase):
|
|||
|
||||
def _ensure_airflow_webserver_is_healthy(self):
|
||||
response = self.session.get(
|
||||
"http://{host}/health".format(host=get_minikube_host()),
|
||||
"http://{host}/health".format(host=KUBERNETES_HOST),
|
||||
timeout=1,
|
||||
)
|
||||
|
||||
|
@ -188,7 +180,7 @@ class TestKubernetesExecutor(unittest.TestCase):
|
|||
return result_json
|
||||
|
||||
def test_integration_run_dag(self):
|
||||
host = get_minikube_host()
|
||||
host = KUBERNETES_HOST
|
||||
dag_id = 'example_kubernetes_executor_config'
|
||||
|
||||
result_json = self.start_dag(dag_id=dag_id, host=host)
|
||||
|
@ -211,7 +203,7 @@ class TestKubernetesExecutor(unittest.TestCase):
|
|||
expected_final_state='success', timeout=100)
|
||||
|
||||
def test_integration_run_dag_with_scheduler_failure(self):
|
||||
host = get_minikube_host()
|
||||
host = KUBERNETES_HOST
|
||||
dag_id = 'example_kubernetes_executor_config'
|
||||
|
||||
result_json = self.start_dag(dag_id=dag_id, host=host)
|
|
@ -43,7 +43,7 @@ except Exception as e: # pylint: disable=broad-except
|
|||
raise e
|
||||
else:
|
||||
raise unittest.SkipTest(
|
||||
"Kubernetes integration tests require a minikube cluster;"
|
||||
"Kubernetes integration tests require a kubernetes cluster;"
|
||||
"Skipping tests {}".format(e)
|
||||
)
|
||||
|
Загрузка…
Ссылка в новой задаче