From afc46f14711631d22c49dffe023e02fd774acd25 Mon Sep 17 00:00:00 2001 From: Victoria Chan Date: Thu, 8 Jun 2023 18:16:05 +0100 Subject: [PATCH] Moved to a single multi stage Dockerfile, and moved some commands from tasks.py to Dockerfile. Not yet sorted out frontend watching. --- Dockerfile | 126 +++++++++++++++++++++++++++++++++++++++++++++ docker-compose.yml | 70 ++++++++++++++----------- tasks.py | 20 ------- 3 files changed, 167 insertions(+), 49 deletions(-) create mode 100644 Dockerfile diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 000000000..353b4e10c --- /dev/null +++ b/Dockerfile @@ -0,0 +1,126 @@ +# (Keep the version in sync with the node install below) +FROM node:18-bullseye-slim as frontend + +# Make build & post-install scripts behave as if we were in a CI environment (e.g. for logging verbosity purposes). +ARG CI=true + +WORKDIR /app + +# Install front-end dependencies. +# This will create a `node_modules` directory in the current directory. +COPY package.json package-lock.json tailwind.config.js esbuild.config.js contribute.json ./ +COPY ./tailwind-plugins/ ./tailwind-plugins/ +RUN npm ci --no-optional --no-audit --progress=false + +# Compile static files from static source at ./source to ./network-api/networkapi/frontend +# This will create a `network-api/networkapi/frontend` directory. +COPY ./source/ ./source/ +COPY ./network-api/networkapi/ ./network-api/networkapi/ +RUN npm run build + + +# We use Debian images because they are considered more stable than the alpine +# ones becase they use a different C compiler. Debian images also come with +# all useful packages required for image manipulation out of the box. They +# however weight a lot, approx. up to 1.5GiB per built image. +# +# Note: Presently we are not using this on production, but use it as part of dev build +FROM python:3.9.9-slim as production + +# Install dependencies in a virtualenv +ENV VIRTUAL_ENV=/app/dockerpythonvenv + +RUN useradd mozilla --create-home && mkdir /app $VIRTUAL_ENV && chown -R mozilla /app $VIRTUAL_ENV + +WORKDIR /app + +# Set default environment variables. They are used at build time and runtime. +# If you specify your own environment variables on Heroku or Dokku, they will +# override the ones set here. The ones below serve as sane defaults only. +# * PATH - Make sure that Poetry is on the PATH, along with our venv +# * PYTHONUNBUFFERED - This is useful so Python does not hold any messages +# from being output. +# https://docs.python.org/3.11/using/cmdline.html#envvar-PYTHONUNBUFFERED +# https://docs.python.org/3.11/using/cmdline.html#cmdoption-u +# * DJANGO_SETTINGS_MODULE - default settings used in the container. +# * PORT - default port used. Please match with EXPOSE so it works on Dokku. +# Heroku will ignore EXPOSE and only set PORT variable. PORT variable is +# read/used by Gunicorn. +# * WEB_CONCURRENCY - number of workers used by Gunicorn. The variable is +# read by Gunicorn. +# * GUNICORN_CMD_ARGS - additional arguments to be passed to Gunicorn. This +# variable is read by Gunicorn +ENV PATH=$VIRTUAL_ENV/bin:$PATH \ + PYTHONUNBUFFERED=1 \ + PYTHONDONTWRITEBYTECODE=1 \ + DJANGO_SETTINGS_MODULE=networkapi.settings \ + PORT=8000 \ + WEB_CONCURRENCY=3 \ + GUNICORN_CMD_ARGS="-c gunicorn-conf.py --max-requests 1200 --max-requests-jitter 50 --access-logfile - --timeout 25" + +# Make $BUILD_ENV available at runtime +ARG BUILD_ENV +ENV BUILD_ENV=${BUILD_ENV} + +# Port exposed by this container. Should default to the port used by your WSGI +# server (Gunicorn). This is read by Dokku only. Heroku will ignore this. +EXPOSE 8000 + +# Install operating system dependencies. +RUN apt-get update --yes --quiet && apt-get install --yes --quiet --no-install-recommends \ + build-essential \ + libpq-dev \ + curl \ + git \ + && apt-get autoremove && rm -rf /var/lib/apt/lists/* + +# Don't use the root user as it's an anti-pattern and Heroku does not run +# containers as root either. +# https://devcenter.heroku.com/articles/container-registry-and-runtime#dockerfile-commands-and-runtime +USER mozilla + +# Install your app's Python requirements. +RUN python -m venv $VIRTUAL_ENV +COPY --chown=mozilla ./requirements.txt ./dev-requirements.txt ./ +RUN pip install -U pip==20.0.2 && pip install pip-tools +RUN pip install -r requirements.txt + +# Copy application code. +COPY --chown=mozilla . . + +# Copy compiled assets from the frontend build stage for collectstatic to work. +COPY --chown=mozilla --from=frontend /app/network-api/networkapi/frontend ./network-api/networkapi/frontend + +# Collect static. This command will move static files from application +# directories and "network-api/networkapi/frontend" folder to the main static directory that +# will be served by the WSGI server. +RUN SECRET_KEY=none python ./network-api/manage.py collectstatic --noinput --clear + +# Run the WSGI server. It reads GUNICORN_CMD_ARGS, PORT and WEB_CONCURRENCY +# environment variable hence we don't specify a lot options below. +CMD gunicorn networkapi.wsgi:application + +# Below is used for local dev builds only +FROM production as dev + +# Swap user, so the following tasks can be run as root +USER root + +# Install node (Keep the version in sync with the node container above) +RUN curl -fsSL https://deb.nodesource.com/setup_18.x | bash - && apt-get install -y nodejs + +# Install `psql`, useful for `manage.py dbshell` +RUN apt-get install -y postgresql-client + +# Restore user +USER mozilla + +# Install dev dependencies +RUN pip install -r dev-requirements.txt + +# Pull in the node modules from the frontend build stage so we don't have to run install again. +# This is just a copy in the container, and is not visible to the host machine. +COPY --chown=mozilla --from=frontend /app/node_modules ./node_modules + +# do nothing forever - exec commands elsewhere +CMD tail -f /dev/null diff --git a/docker-compose.yml b/docker-compose.yml index 78b6b1944..4fea53c69 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -1,26 +1,56 @@ version: '3' services: - - watch-static-files: + backend: + platform: linux/amd64 # Fix issues for M1 Macs build: + target: dev context: . - dockerfile: ./dockerfiles/Dockerfile.node - env_file: - - .env environment: # Need to specify the SHELL env var for chokidar - SHELL=/bin/sh # Force polling because inotify doesn't work on Docker Windows - CHOKIDAR_USEPOLLING=1 - CHOKIDAR_INTERVAL=2000 - command: npm run watch + env_file: + - ".env" + command: /app/dockerpythonvenv/bin/python network-api/manage.py runserver 0.0.0.0:8000 + ports: + - "8000:8000" + - "8001:8001" # ptvsd port for debugging volumes: - - .:/app - - node_modules:/app/node_modules/ - - dockerpythonvenv:/app/dockerpythonvenv/ + # The container already has a copy of the application code from build time, but we are also + # mounting some files here so the container can see changes to them or write to them. + # This is useful for development. + - ./.git:/app/.git:rw + - ./network-api:/app/network-api:rw + - ./.env:/app/.env:rw + - ./tests:/app/tests:rw + - ./pyproject.toml:/app/pyproject.toml:rw + - ./release-steps.sh:/app/release-steps.sh:rw + - ./requirements.txt:/app/requirements.txt:rw + - ./dev-requirements.txt:/app/dev-requirements.txt:rw + + # Frontend config + - ./source:/app/source:rw + - ./.editorconfig:/app/.editorconfig:rw + - ./.eslintignore:/app/.eslintignore:rw + - ./.eslintrc.a11y.json:/app/.eslintrc.a11y.json:rw + - ./.eslintrc.json:/app/.eslintrc.json:rw + - ./.prettierignore:/app/.prettierignore:rw + - ./.prettierrc:/app/.prettierrc:rw + - ./.stylelintrc:/app/.stylelintrc:rw + - ./.stylelintrc-colors.js:/app/.stylelintrc-colors.js:rw + - ./esbuild.config.js:/app/esbuild.config.js:rw + - ./pyrightconfig.json:/app/pyrightconfig.json:rw + - ./playwright.config.js:/app/playwright.config.js:rw + - ./postcss.config.js:/app/postcss.config.js:rw + - ./tailwind-plugins:/app/tailwind-plugins:rw + - ./tailwind.config.js:/app/tailwind.config.js:rw + - ./package-lock.json:/app/package-lock.json:rw + - ./package.json:/app/package.json:rw depends_on: - - backend + - postgres postgres: image: postgres:13.2 @@ -33,24 +63,6 @@ services: - POSTGRES_HOST_AUTH_METHOD=trust volumes: - postgres_data:/var/lib/postgresql/data/ - - backend: - build: - context: . - dockerfile: ./dockerfiles/Dockerfile.python - env_file: - - ".env" - command: dockerpythonvenv/bin/python network-api/manage.py runserver 0.0.0.0:8000 - ports: - - "8000:8000" - - "8001:8001" # ptvsd port for debugging - volumes: - - .:/app - - dockerpythonvenv:/app/dockerpythonvenv/ - depends_on: - - postgres - + volumes: postgres_data: - node_modules: - dockerpythonvenv: diff --git a/tasks.py b/tasks.py index f7925bed1..b3ff4d437 100644 --- a/tasks.py +++ b/tasks.py @@ -167,26 +167,6 @@ def setup(ctx): ctx.run("docker-compose down --volumes") print("* Building Docker images") ctx.run("docker-compose build") - print("* Creating a Python virtualenv") - ctx.run( - "docker-compose run --rm backend python -m venv dockerpythonvenv", - **PLATFORM_ARG, - ) - print("* Install Node dependencies") - npm_install(ctx) - print("Done!") - print("* Updating pip") - ctx.run( - "docker-compose run --rm backend ./dockerpythonvenv/bin/pip install -U pip==20.0.2", - **PLATFORM_ARG, - ) - print("* Installing pip-tools") - ctx.run( - "docker-compose run --rm backend ./dockerpythonvenv/bin/pip install pip-tools", - **PLATFORM_ARG, - ) - print("* Sync Python dependencies") - pip_sync(ctx) initialize_database(ctx) print("\n* Start your dev server with:\n docker-compose up")