Bug 1626058: Build docker images with kaniko, instead of dind; r=glandium,hwine

Differential Revision: https://phabricator.services.mozilla.com/D77864
This commit is contained in:
Tom Prince 2020-07-07 19:53:32 +00:00
Родитель 033c399c15
Коммит a22772e2a2
42 изменённых файлов: 1613 добавлений и 527 удалений

Просмотреть файл

@ -217,3 +217,6 @@ gfx/wr/target/
# Ignore this files in certviewer
toolkit/components/certviewer/content/node_modules/
toolkit/components/certviewer/content/package-lock.json
# Ignore Rust/Cargo output from running `cargo` directly for image_builder docker image
^taskcluster/docker/image_builder/build-image/target

Просмотреть файл

@ -257,6 +257,14 @@ tasks:
type: 'directory'
path: '/builds/worker/artifacts'
expires: {$eval: expires}
'public/docker-contexts':
type: 'directory'
path: '/builds/worker/checkouts/gecko/docker-contexts'
# This needs to be at least the deadline of the
# decision task + the docker-image task deadlines.
# It is set to a week to allow for some time for
# debugging, but they are not useful long-term.
expires: {$fromNow: '7 day'}
extra:
$merge:

Просмотреть файл

@ -18,8 +18,6 @@ transforms:
# generate tasks for every docker image in the directory, secure in the
# knowledge that unnecessary images will be omitted from the target task graph
jobs:
image_builder:
symbol: I(ib)
debian10-test:
symbol: I(deb10-test)
ubuntu1804-test:

Просмотреть файл

@ -1,4 +1,3 @@
# %ARG DOCKER_IMAGE_PARENT
FROM $DOCKER_IMAGE_PARENT
MAINTAINER Nick Alexander <nalexander@mozilla.com>

Просмотреть файл

@ -1,4 +1,3 @@
# %ARG DOCKER_IMAGE_PARENT
FROM $DOCKER_IMAGE_PARENT
MAINTAINER Tarek Ziade <tarek@mozilla.com>

Просмотреть файл

@ -1,4 +1,3 @@
# %ARG DOCKER_IMAGE_PARENT
FROM $DOCKER_IMAGE_PARENT
MAINTAINER Joel Maher <jmaher@mozilla.com>

Просмотреть файл

@ -1,4 +1,3 @@
# %ARG DOCKER_IMAGE_PARENT
FROM $DOCKER_IMAGE_PARENT
MAINTAINER Mike Hommey <mhommey@mozilla.com>
@ -27,8 +26,8 @@ ENV HOME=/builds/worker \
# Set a default command useful for debugging
CMD ["/bin/bash", "--login"]
# %ARG TASKCLUSTER_ROOT_URL
# %ARG DOCKER_IMAGE_PACKAGES
ARG TASKCLUSTER_ROOT_URL
ARG DOCKER_IMAGE_PACKAGES
RUN /usr/local/sbin/setup_packages.sh $TASKCLUSTER_ROOT_URL $DOCKER_IMAGE_PACKAGES && \
apt-get update && \
apt-get dist-upgrade && \

Просмотреть файл

@ -1,4 +1,3 @@
# %ARG DOCKER_IMAGE_PARENT
FROM $DOCKER_IMAGE_PARENT
MAINTAINER Mike Hommey <mhommey@mozilla.com>
@ -8,8 +7,8 @@ VOLUME /builds/worker/tooltool-cache
ENV XZ_OPT=-T0
# %ARG TASKCLUSTER_ROOT_URL
# %ARG DOCKER_IMAGE_PACKAGES
ARG TASKCLUSTER_ROOT_URL
ARG DOCKER_IMAGE_PACKAGES
RUN /usr/local/sbin/setup_packages.sh $TASKCLUSTER_ROOT_URL $DOCKER_IMAGE_PACKAGES
# %ARG ARCH

Просмотреть файл

@ -1,4 +1,3 @@
# %ARG DOCKER_IMAGE_PARENT
FROM $DOCKER_IMAGE_PARENT
MAINTAINER Mike Hommey <mhommey@mozilla.com>

Просмотреть файл

@ -1,4 +1,3 @@
# %ARG DOCKER_IMAGE_PARENT
FROM $DOCKER_IMAGE_PARENT
MAINTAINER Wes Kocher <wkocher@mozilla.com>

Просмотреть файл

@ -1,4 +1,3 @@
# %ARG DOCKER_IMAGE_PARENT
FROM $DOCKER_IMAGE_PARENT
MAINTAINER Mike Hommey <mhommey@mozilla.com>
@ -6,8 +5,8 @@ VOLUME /builds/worker/checkouts
VOLUME /builds/worker/workspace
VOLUME /builds/worker/tooltool-cache
# %ARG TASKCLUSTER_ROOT_URL
# %ARG DOCKER_IMAGE_PACKAGES
ARG TASKCLUSTER_ROOT_URL
ARG DOCKER_IMAGE_PACKAGES
RUN /usr/local/sbin/setup_packages.sh $TASKCLUSTER_ROOT_URL $DOCKER_IMAGE_PACKAGES && \
apt-get update && \
apt-get install cmake

Просмотреть файл

@ -1,4 +1,3 @@
# %ARG DOCKER_IMAGE_PARENT
FROM $DOCKER_IMAGE_PARENT
MAINTAINER Mike Hommey <mhommey@mozilla.com>

Просмотреть файл

@ -1,4 +1,3 @@
# %ARG DOCKER_IMAGE_PARENT
FROM $DOCKER_IMAGE_PARENT
### Add worker user and setup its workspace.
@ -11,8 +10,8 @@ RUN mkdir /builds && \
# Declare default working folder
WORKDIR /builds/worker
# %ARG TASKCLUSTER_ROOT_URL
# %ARG DOCKER_IMAGE_PACKAGES
ARG TASKCLUSTER_ROOT_URL
ARG DOCKER_IMAGE_PACKAGES
RUN /usr/local/sbin/setup_packages.sh $TASKCLUSTER_ROOT_URL $DOCKER_IMAGE_PACKAGES && \
apt-get update && \
apt-get install \

Просмотреть файл

@ -1,4 +1,3 @@
# %ARG DOCKER_IMAGE_PARENT
FROM $DOCKER_IMAGE_PARENT
MAINTAINER Mike Hommey <mhommey@mozilla.com>
@ -8,8 +7,8 @@ VOLUME /builds/worker/tooltool-cache
ENV XZ_OPT=-T0
# %ARG TASKCLUSTER_ROOT_URL
# %ARG DOCKER_IMAGE_PACKAGES
ARG TASKCLUSTER_ROOT_URL
ARG DOCKER_IMAGE_PACKAGES
RUN /usr/local/sbin/setup_packages.sh $TASKCLUSTER_ROOT_URL $DOCKER_IMAGE_PACKAGES
RUN apt-get update && \

Просмотреть файл

@ -1,4 +1,3 @@
# %ARG DOCKER_IMAGE_PARENT
FROM $DOCKER_IMAGE_PARENT
MAINTAINER Kartikaya Gupta <kgupta@mozilla.com>

Просмотреть файл

@ -1,47 +1,102 @@
FROM ubuntu:18.04
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
# %include python/mozbuild/mozbuild/action/tooltool.py
ADD topsrcdir/python/mozbuild/mozbuild/action/tooltool.py /setup/tooltool.py
FROM golang:1.14 as skopeo
# %include taskcluster/docker/recipes/common.sh
ADD topsrcdir/taskcluster/docker/recipes/common.sh /setup/common.sh
WORKDIR /go/src/
RUN ["git", "clone", "--no-checkout", "--depth=1", "--branch=v1.1.0", "https://github.com/containers/skopeo", "."]
RUN ["git", "checkout", "63085f5bef1131aa9ec0907a5c8d66b67de7c4b2"]
ENV GO111MODULE=on CGO_ENABLED=0
RUN ["go", "build", \
"-mod=vendor", "-o", "out/skopeo", \
"-tags", "exclude_graphdriver_devicemapper exclude_graphdriver_btrfs containers_image_openpgp", \
# Set unixTempDirForBigFiles so skopeo will extract in a directory hidden by kaniko
# We create the directory below.
"-ldflags", " -X github.com/containers/image/v5/internal/tmpdir.unixTempDirForBigFiles=/workspace/tmp -X github.com/containers/image/v5/signature.systemDefaultPolicyPath=/kaniko/containers/policy.json -extldflags \"-static\" -w -s", \
"./cmd/skopeo"]
# %include taskcluster/docker/recipes/install-mercurial.sh
ADD topsrcdir/taskcluster/docker/recipes/install-mercurial.sh /setup/install-mercurial.sh
FROM golang:1.14 as kaniko
WORKDIR /go/src/
RUN ["git", "clone", "--no-checkout", "--depth=1", "--branch=v0.24.0", "https://github.com/GoogleContainerTools/kaniko", "."]
RUN ["git", "checkout", "cdbd8af0578c56e2801b57461e9f417f9479d303"]
RUN ["make"]
# %include testing/mozharness/external_tools/robustcheckout.py
ADD topsrcdir/testing/mozharness/external_tools/robustcheckout.py /usr/local/mercurial/robustcheckout.py
# Build the `build-image` command as a static binary using musl
# The setup is loosely based on a stripped down version of
# https://github.com/emk/rust-musl-builder/blob/master/Dockerfile
FROM debian:buster as build-image
# %include taskcluster/docker/recipes/hgrc
COPY topsrcdir/taskcluster/docker/recipes/hgrc /etc/mercurial/hgrc.d/mozilla.rc
COPY apt.conf /etc/apt/apt.conf.d/99taskcluster
# %include taskcluster/scripts/run-task
ADD topsrcdir/taskcluster/scripts/run-task /usr/local/bin/run-task
RUN apt-get update && \
apt-get install \
build-essential \
ca-certificates \
curl \
musl-dev \
musl-tools \
&& \
useradd rust --user-group --create-home --shell /bin/bash
# Add and run setup script
ADD build-image.sh /usr/local/bin/build-image.sh
ADD download-and-compress /usr/local/bin/download-and-compress
ADD setup.sh /setup/setup.sh
ADD requirements/py2.txt /setup/requirements-py2.txt
ADD requirements/py3.txt /setup/requirements-py3.txt
RUN bash /setup/setup.sh
# Run all further code as user `rust`, and create our working directories
# as the appropriate user.
USER rust
# Setup a workspace that won't use AUFS.
VOLUME /builds/worker/checkouts
VOLUME /builds/worker/workspace
# Set up our path with all our binary directories, including those for the
# musl-gcc toolchain and for our Rust toolchain.
ENV PATH=/home/rust/.cargo/bin:$PATH
# Set variable normally configured at login, by the shells parent process, these
# are taken from GNU su manual
ENV HOME /builds/worker
ENV SHELL /bin/bash
ENV USER worker
ENV LOGNAME worker
ENV HOSTNAME taskcluster-worker
ENV LC_ALL C
# The Rust toolchain to use when building our image. Set by `hooks/build`.
ENV TOOLCHAIN=1.42.0 \
TARGET=x86_64-unknown-linux-musl
# Create worker user
RUN useradd -d /builds/worker -s /bin/bash -m worker
# Install our Rust toolchain and the `musl` target. We patch the
# command-line we pass to the installer so that it won't attempt to
# interact with the user or fool around with TTYs. We also set the default
# `--target` to musl so that our users don't need to keep overriding it
# manually.
RUN curl https://sh.rustup.rs -sSf | \
sh -s -- -y \
--profile minimal \
--default-toolchain $TOOLCHAIN \
--target $TARGET
# Set some sane defaults
WORKDIR /builds/worker/
CMD build-image.sh
# Expect our source code to live in /home/rust/src. We'll run the build as
# user `rust`, which will be uid 1000, gid 1000 outside the container.
RUN mkdir -p /home/rust/src
WORKDIR /home/rust/src
# Add our source code.
ADD --chown=rust:rust build-image/ ./
# --out-dir is not yet stable
ENV RUSTC_BOOTSTRAP=1
# Build our application.
RUN ["cargo", "build", "--target", "x86_64-unknown-linux-musl", "--out-dir=bin", "--release", "-Zunstable-options"]
FROM scratch as empty
FROM scratch
COPY --from=skopeo /go/src/out/skopeo /kaniko/skopeo
COPY --from=kaniko /go/src/out/executor /kaniko/executor
COPY --from=build-image \
/home/rust/src/bin/build-image \
/kaniko/build-image
ADD https://mkcert.org/generate/ /kaniko/ssl/certs/ca-certificats.crt
ENV SSL_CERT_DIR=/kaniko/ssl/certs
ADD policy.json /kaniko/containers/policy.json
ENV HOME /root
ENV USER /root
WORKDIR /workspace
ENV PATH /usr/local/bin:/kaniko
VOLUME /workspace
# Create an empty temporary directory for skopeo
COPY --from=empty / /workspace/tmp
COPY --from=empty / /workspace/cache
ENTRYPOINT ["/kaniko/build-image"]

Просмотреть файл

@ -1 +1 @@
3.0.0
4.0.0

Просмотреть файл

@ -0,0 +1,5 @@
quiet "true";
APT::Get::Assume-Yes "true";
APT::Install-Recommends "false";
Acquire::Check-Valid-Until "false";
Acquire::Retries "5";

Просмотреть файл

@ -1,66 +0,0 @@
#!/bin/bash -vex
# Set bash options to exit immediately if a pipeline exists non-zero, expand
# print a trace of commands, and make output verbose (print shell input as it's
# read)
# See https://www.gnu.org/software/bash/manual/html_node/The-Set-Builtin.html
set -x -e -v -o pipefail
# Prefix errors with taskcluster error prefix so that they are parsed by Treeherder
raise_error() {
echo
echo "[taskcluster-image-build:error] $1"
exit 1
}
# Ensure that the PROJECT is specified so the image can be indexed
test -n "$PROJECT" || raise_error "PROJECT must be provided."
test -n "$HASH" || raise_error "Context HASH must be provided."
test -n "$IMAGE_NAME" || raise_error "IMAGE_NAME must be provided."
# The docker socket is mounted by the taskcluster worker in a way that prevents
# us changing its permissions to allow the worker user to access it. Create a
# proxy socket that the worker user can use.
export DOCKER_SOCKET=/var/run/docker.proxy
socat UNIX-LISTEN:$DOCKER_SOCKET,fork,group=worker,mode=0775 UNIX-CLIENT:/var/run/docker.sock </dev/null &
# Disable check until new version is tested.
# shellcheck disable=SC2064
trap "kill $!" EXIT
LOAD_COMMAND=
if [ -n "$DOCKER_IMAGE_PARENT" ]; then
test -n "$DOCKER_IMAGE_PARENT_TASK" || raise_error "DOCKER_IMAGE_PARENT_TASK must be provided."
LOAD_COMMAND="\
/builds/worker/checkouts/gecko/mach taskcluster-load-image \
--task-id \"$DOCKER_IMAGE_PARENT_TASK\" \
-t \"$DOCKER_IMAGE_PARENT\" && "
fi
# Build image
run-task \
--gecko-checkout "/builds/worker/checkouts/gecko" \
--gecko-sparse-profile build/sparse-profiles/docker-image \
-- \
sh -x -c "$LOAD_COMMAND \
/builds/worker/checkouts/gecko/mach taskcluster-build-image \
-t \"${IMAGE_NAME}:${HASH}-pre\" \
\"$IMAGE_NAME\""
# Squash the image
export DOCKER_HOST=unix:/$DOCKER_SOCKET
/usr/local/bin/docker-squash -v -t "${IMAGE_NAME}:${HASH}" "${IMAGE_NAME}:${HASH}-pre"
# Create artifact folder (note that this must occur after run-task)
mkdir -p /builds/worker/workspace/artifacts
# Get image from docker daemon (try up to 10 times)
# This interacts directly with the docker remote API, see:
# https://docs.docker.com/engine/reference/api/docker_remote_api_v1.18/
#
# The script will retry up to 10 times.
# Disable quoting error until fixing the / escaping
# shellcheck disable=SC2086
/usr/local/bin/download-and-compress \
http+unix://%2Fvar%2Frun%2Fdocker.sock/images/${IMAGE_NAME}:${HASH}/get \
/builds/worker/workspace/image.tar.zst.tmp \
/builds/worker/workspace/artifacts/image.tar.zst

1078
taskcluster/docker/image_builder/build-image/Cargo.lock сгенерированный Normal file

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -0,0 +1,22 @@
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
[package]
name = "build-image"
version = "0.1.0"
authors = ["Tom Prince <tom.prince@twistedmatrix.com>"]
edition = "2018"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
reqwest = { version= "0.10.4", features = ["rustls-tls", "blocking"], default-features = false}
zstd = "0.5.1"
url = "2.1.1"
anyhow = "1.0.27"
serde = { version = "1.0.105", features = ["derive"]}
serde_json = "1.0.50"
envy = "0.4.1"
[workspace]

Просмотреть файл

@ -0,0 +1,112 @@
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at http://mozilla.org/MPL/2.0/.
use anyhow::Result;
use serde::de::Error;
use serde::Deserialize;
use std::collections::HashMap;
fn default_image_name() -> String {
"mozilla.org/taskgraph/default-image:latest".into()
}
fn default_zstd_level() -> i32 {
3
}
fn from_json<'de, D, T>(deserializer: D) -> Result<T, D::Error>
where
D: serde::de::Deserializer<'de>,
T: serde::de::DeserializeOwned,
{
let value: String = serde::Deserialize::deserialize(deserializer)?;
serde_json::from_str(&value).map_err(|err| {
D::Error::invalid_value(serde::de::Unexpected::Str(&value), &&*err.to_string())
})
}
#[derive(Deserialize, Debug, PartialEq, Eq)]
pub struct Config {
pub context_task_id: String,
pub context_path: String,
pub parent_task_id: Option<String>,
#[serde(default = "default_image_name")]
pub image_name: String,
#[serde(default = "default_zstd_level")]
pub docker_image_zstd_level: i32,
#[serde(default)]
pub debug: bool,
#[serde(default, deserialize_with = "from_json")]
pub docker_build_args: HashMap<String, String>,
}
impl Config {
pub fn from_env() -> Result<Config> {
Ok(envy::from_env()?)
}
}
#[cfg(test)]
mod test {
use anyhow::Result;
#[test]
fn test() -> Result<()> {
let env: Vec<(String, String)> = vec![
("CONTEXT_TASK_ID".into(), "xGRRgzG6QlCCwsFsyuqm0Q".into()),
(
"CONTEXT_PATH".into(),
"public/docker-contexts/image.tar.gz".into(),
),
];
let config: super::Config = envy::from_iter(env.into_iter())?;
assert_eq!(
config,
super::Config {
context_task_id: "xGRRgzG6QlCCwsFsyuqm0Q".into(),
context_path: "public/docker-contexts/image.tar.gz".into(),
parent_task_id: None,
image_name: "mozilla.org/taskgraph/default-image:latest".into(),
docker_image_zstd_level: 3,
debug: false,
docker_build_args: Default::default()
}
);
Ok(())
}
#[test]
fn test_docker_build_args() -> Result<()> {
let env: Vec<(String, String)> = vec![
("CONTEXT_TASK_ID".into(), "xGRRgzG6QlCCwsFsyuqm0Q".into()),
(
"CONTEXT_PATH".into(),
"public/docker-contexts/image.tar.gz".into(),
),
(
"DOCKER_BUILD_ARGS".into(),
serde_json::json! ({
"test": "Value",
})
.to_string(),
),
];
let config: super::Config = envy::from_iter(env.into_iter())?;
assert_eq!(
config,
super::Config {
context_task_id: "xGRRgzG6QlCCwsFsyuqm0Q".into(),
context_path: "public/docker-contexts/image.tar.gz".into(),
parent_task_id: None,
image_name: "mozilla.org/taskgraph/default-image:latest".into(),
docker_image_zstd_level: 3,
debug: false,
docker_build_args: [("test".to_string(), "Value".to_string())]
.iter()
.cloned()
.collect(),
}
);
Ok(())
}
}

Просмотреть файл

@ -0,0 +1,169 @@
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at http://mozilla.org/MPL/2.0/.
#![forbid(unsafe_code)]
use std::collections::HashMap;
use std::process::Command;
use anyhow::{ensure, Context, Result};
use serde::Deserialize;
mod config;
mod taskcluster;
use config::Config;
fn log_step(msg: &str) {
println!("[build-image] {}", msg);
}
fn read_image_digest(path: &str) -> Result<String> {
let output = Command::new("/kaniko/skopeo")
.arg("inspect")
.arg(format!("docker-archive:{}", path))
.stdout(std::process::Stdio::piped())
.spawn()?
.wait_with_output()?;
ensure!(output.status.success(), "Could not inspect parent image.");
#[derive(Deserialize, Debug)]
#[serde(rename_all = "PascalCase")]
struct ImageInfo {
#[serde(skip_serializing_if = "Option::is_none")]
name: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
tag: Option<String>,
digest: String,
// ...
}
let image_info: ImageInfo = serde_json::from_slice(&output.stdout)
.with_context(|| format!("Could parse image info from {:?}", path))?;
Ok(image_info.digest)
}
fn download_parent_image(
cluster: &taskcluster::TaskCluster,
task_id: &str,
dest: &str,
) -> Result<String> {
zstd::stream::copy_decode(
cluster.stream_artifact(&task_id, "public/image.tar.zst")?,
std::fs::File::create(dest)?,
)
.context("Could not download parent image.")?;
read_image_digest(dest)
}
fn build_image(
context_path: &str,
dest: &str,
debug: bool,
build_args: HashMap<String, String>,
) -> Result<()> {
let mut command = Command::new("/kaniko/executor");
command
.stderr(std::process::Stdio::inherit())
.args(&["--context", &format!("tar://{}", context_path)])
.args(&["--destination", "image"])
.args(&["--dockerfile", "Dockerfile"])
.arg("--no-push")
.args(&["--cache-dir", "/workspace/cache"])
.arg("--single-snapshot")
// FIXME: Generating reproducible layers currently causes OOM.
// .arg("--reproducible")
.arg("--whitelist-var-run=false")
.args(&["--tarPath", dest]);
if debug {
command.args(&["-v", "debug"]);
}
for (key, value) in build_args {
command.args(&["--build-arg", &format!("{}={}", key, value)]);
}
let status = command.status()?;
ensure!(status.success(), "Could not build image.");
Ok(())
}
fn repack_image(source: &str, dest: &str, image_name: &str) -> Result<()> {
let status = Command::new("/kaniko/skopeo")
.arg("copy")
.arg(format!("docker-archive:{}", source))
.arg(format!("docker-archive:{}:{}", dest, image_name))
.stderr(std::process::Stdio::inherit())
.status()?;
ensure!(status.success(), "Could repack image.");
Ok(())
}
fn main() -> Result<()> {
let config = Config::from_env().context("Could not parse environment variables.")?;
let cluster = taskcluster::TaskCluster::from_env()?;
let mut build_args = config.docker_build_args;
build_args.insert("TASKCLUSTER_ROOT_URL".into(), cluster.root_url());
log_step("Downloading context.");
std::io::copy(
&mut cluster.stream_artifact(&config.context_task_id, &config.context_path)?,
&mut std::fs::File::create("/workspace/context.tar.gz")?,
)
.context("Could not download image context.")?;
if let Some(parent_task_id) = config.parent_task_id {
log_step("Downloading image.");
let digest = download_parent_image(&cluster, &parent_task_id, "/workspace/parent.tar")?;
log_step(&format!("Parent image digest {}", &digest));
std::fs::rename(
"/workspace/parent.tar",
format!("/workspace/cache/{}", digest),
)?;
build_args.insert(
"DOCKER_IMAGE_PARENT".into(),
format!("parent:latest@{}", digest),
);
}
log_step("Building image.");
build_image(
"/workspace/context.tar.gz",
"/workspace/image-pre.tar",
config.debug,
build_args,
)?;
log_step("Repacking image.");
repack_image(
"/workspace/image-pre.tar",
"/workspace/image.tar",
&config.image_name,
)?;
log_step("Compressing image.");
compress_file(
"/workspace/image.tar",
"/workspace/image.tar.zst",
config.docker_image_zstd_level,
)?;
Ok(())
}
fn compress_file(
source: impl AsRef<std::path::Path>,
dest: impl AsRef<std::path::Path>,
zstd_level: i32,
) -> Result<()> {
Ok(zstd::stream::copy_encode(
std::fs::File::open(source)?,
std::fs::File::create(dest)?,
zstd_level,
)?)
}

Просмотреть файл

@ -0,0 +1,55 @@
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at http://mozilla.org/MPL/2.0/.
use anyhow::{Context, Result};
pub struct TaskCluster {
root_url: url::Url,
client: reqwest::blocking::Client,
}
impl TaskCluster {
pub fn from_env() -> Result<Self> {
std::env::var("TASKCLUSTER_ROOT_URL")
.context("TASKCLUSTER_ROOT_URL not set.")
.and_then(|var| var.parse().context("Couldn't parse TASKCLUSTER_ROOT_URL."))
.map(|root_url| TaskCluster {
root_url,
client: reqwest::blocking::Client::new(),
})
}
/// Return the root URL as suitable for passing to other processes.
///
/// In particular, any trailing slashes are removed.
pub fn root_url(&self) -> String {
self.root_url.as_str().trim_end_matches("/").to_string()
}
pub fn task_artifact_url(&self, task_id: &str, path: &str) -> url::Url {
let mut url = self.root_url.clone();
url.set_path(&format!("api/queue/v1/task/{}/artifacts/{}", task_id, path));
url
}
pub fn stream_artifact(&self, task_id: &str, path: &str) -> Result<impl std::io::Read> {
let url = self.task_artifact_url(task_id, path);
Ok(self.client.get(url).send()?.error_for_status()?)
}
}
#[cfg(test)]
mod test {
#[test]
fn test_url() {
let cluster = super::TaskCluster {
root_url: url::Url::parse("http://taskcluster.example").unwrap(),
client: reqwest::blocking::Client::new(),
};
assert_eq!(
cluster.task_artifact_url("QzDLgP4YRwanIvgPt6ClfA","public/docker-contexts/decision.tar.gz"),
url::Url::parse("http://taskcluster.example/api/queue/v1/task/QzDLgP4YRwanIvgPt6ClfA/artifacts/public/docker-contexts/decision.tar.gz").unwrap(),
);
}
}

Просмотреть файл

@ -1,85 +0,0 @@
#!/usr/bin/python3 -u
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import os
import sys
import time
import requests
import requests_unixsocket
import zstandard as zstd
# Allow requests to fetch from UNIX domain sockets.
requests_unixsocket.monkeypatch()
def download_and_compress(url, path, level):
r = requests.get(url, stream=True)
if r.status_code != 200:
raise Exception('non-200 response: %d' % r.status_code)
in_size = 0
out_size = 0
last_progress = time.time()
# Use all available CPU cores for multi-threaded compression.
cctx = zstd.ZstdCompressor(threads=-1, level=level, write_checksum=True)
cobj = cctx.compressobj()
with open(path, 'wb') as fh:
for raw in r.iter_content(zstd.COMPRESSION_RECOMMENDED_INPUT_SIZE):
# Print output periodically, for humans.
now = time.time()
if now - last_progress > 5.0:
print('%d -> %d' % (in_size, out_size))
last_progress = now
in_size += len(raw)
chunk = cobj.compress(raw)
if not chunk:
continue
out_size += len(chunk)
fh.write(chunk)
chunk = cobj.flush()
out_size += len(chunk)
fh.write(chunk)
return in_size, out_size
if __name__ == '__main__':
url, temp_path, final_path = sys.argv[1:]
# Default zstd level is 3. We default to 10 because multi-threaded
# compression allows us to burn lots of CPU for significant image
# size reductions without a major wall time penalty.
level = int(os.environ.get('DOCKER_IMAGE_ZSTD_LEVEL', '10'))
print('using zstandard compression level %d' % level)
count = 0
while count < 10:
count += 1
try:
t_start = time.time()
raw_size, compress_size = download_and_compress(url, temp_path,
level)
elapsed = time.time() - t_start
# Move to final path at end so partial image isn't uploaded as
# an artifact.
os.rename(temp_path, final_path)
speed = int(raw_size / elapsed) / 1000000
print('compression ratio: %.2f (%d -> %d) @ %d MB/s' % (
float(compress_size) / float(raw_size),
raw_size, compress_size, speed))
sys.exit(0)
except Exception as e:
print('exception: %s' % e)
time.sleep(5)
print('reached maximum retry attempts; giving up')
sys.exit(1)

Просмотреть файл

@ -0,0 +1,11 @@
{
"default": [{"type": "reject"}],
"transports": {
"docker-archive": {
"": [{"type": "insecureAcceptAnything"}]
},
"dir": {
"": [{"type": "insecureAcceptAnything"}]
}
}
}

Просмотреть файл

@ -1,2 +0,0 @@
# For compressing docker images
zstandard

Просмотреть файл

@ -1,67 +0,0 @@
#
# This file is autogenerated by pip-compile
# To update, run:
#
# pip-compile --generate-hashes --output-file requirements/py2.txt requirements/py2.in
#
cffi==1.12.1 \
--hash=sha256:0b5f895714a7a9905148fc51978c62e8a6cbcace30904d39dcd0d9e2265bb2f6 \
--hash=sha256:27cdc7ba35ee6aa443271d11583b50815c4bb52be89a909d0028e86c21961709 \
--hash=sha256:2d4a38049ea93d5ce3c7659210393524c1efc3efafa151bd85d196fa98fce50a \
--hash=sha256:3262573d0d60fc6b9d0e0e6e666db0e5045cbe8a531779aa0deb3b425ec5a282 \
--hash=sha256:358e96cfffc185ab8f6e7e425c7bb028931ed08d65402fbcf3f4e1bff6e66556 \
--hash=sha256:37c7db824b5687fbd7ea5519acfd054c905951acc53503547c86be3db0580134 \
--hash=sha256:39b9554dfe60f878e0c6ff8a460708db6e1b1c9cc6da2c74df2955adf83e355d \
--hash=sha256:42b96a77acf8b2d06821600fa87c208046decc13bd22a4a0e65c5c973443e0da \
--hash=sha256:5b37dde5035d3c219324cac0e69d96495970977f310b306fa2df5910e1f329a1 \
--hash=sha256:5d35819f5566d0dd254f273d60cf4a2dcdd3ae3003dfd412d40b3fe8ffd87509 \
--hash=sha256:5df73aa465e53549bd03c819c1bc69fb85529a5e1a693b7b6cb64408dd3970d1 \
--hash=sha256:7075b361f7a4d0d4165439992d0b8a3cdfad1f302bf246ed9308a2e33b046bd3 \
--hash=sha256:7678b5a667b0381c173abe530d7bdb0e6e3b98e062490618f04b80ca62686d96 \
--hash=sha256:7dfd996192ff8a535458c17f22ff5eb78b83504c34d10eefac0c77b1322609e2 \
--hash=sha256:8a3be5d31d02c60f84c4fd4c98c5e3a97b49f32e16861367f67c49425f955b28 \
--hash=sha256:9812e53369c469506b123aee9dcb56d50c82fad60c5df87feb5ff59af5b5f55c \
--hash=sha256:9b6f7ba4e78c52c1a291d0c0c0bd745d19adde1a9e1c03cb899f0c6efd6f8033 \
--hash=sha256:a85bc1d7c3bba89b3d8c892bc0458de504f8b3bcca18892e6ed15b5f7a52ad9d \
--hash=sha256:aa6b9c843ad645ebb12616de848cc4e25a40f633ccc293c3c9fe34107c02c2ea \
--hash=sha256:bae1aa56ee00746798beafe486daa7cfb586cd395c6ce822ba3068e48d761bc0 \
--hash=sha256:bae96e26510e4825d5910a196bf6b5a11a18b87d9278db6d08413be8ea799469 \
--hash=sha256:bd78df3b594013b227bf31d0301566dc50ba6f40df38a70ded731d5a8f2cb071 \
--hash=sha256:c2711197154f46d06f73542c539a0ff5411f1951fab391e0a4ac8359badef719 \
--hash=sha256:d998c20e3deed234fca993fd6c8314cb7cbfda05fd170f1bd75bb5d7421c3c5a \
--hash=sha256:df4f840d77d9e37136f8e6b432fecc9d6b8730f18f896e90628712c793466ce6 \
--hash=sha256:f5653c2581acb038319e6705d4e3593677676df14b112f13e0b5b44b6a18df1a \
--hash=sha256:f7c7aa485a2e2250d455148470ffd0195eecc3d845122635202d7467d6f7b4cf \
--hash=sha256:f9e2c66a6493147de835f207f198540a56b26745ce4f272fbc7c2f2cfebeb729 \
# via zstandard
pycparser==2.19 \
--hash=sha256:a988718abfad80b6b157acce7bf130a30876d27603738ac39f140993246b25b3 \
# via cffi
zstandard==0.11.1 \
--hash=sha256:19f5ad81590acd20dbdfb930b87a035189778662fdc67ab8cbcc106269ed1be8 \
--hash=sha256:1a1db0c9774181e806a418c32d511aa085c7e2c28c257a58f6c107f5decb3109 \
--hash=sha256:22d7aa898f36f78108cc1ef0c8da8225f0add518441d815ad4fdd1d577378209 \
--hash=sha256:357873afdd7cd0e653d169c36ce837ce2b3e5926dd4a5c0f0476c813f6765373 \
--hash=sha256:3c31da5d78a7b07e722e8a3e0b1295bc9b316b7e90a1666659c451a42750ffe4 \
--hash=sha256:3f76562ec63fabc6f4b5be0cd986f911c97105c35c31b4d655b90c4d2fe07f40 \
--hash=sha256:42fa4462e0563fe17e73dfeb95eef9b00429b86282f8f6ca0e2765b1855a8324 \
--hash=sha256:51aad01a5709ca6f45768c69ffd4c887528e5ad9e09302426b735560752c4e82 \
--hash=sha256:6cd81819a02e57e38e27c53c5c0a7015e059b0e148a18bf27b46b4f808840879 \
--hash=sha256:717fd2494f222164396e03d08ef57174d2a889920b81ca49f276caf9381e6405 \
--hash=sha256:71c8711458212c973a9b719275db8111f22803e0caf675affde50703b96e9be1 \
--hash=sha256:76a331b5a6258fce3906551557db9be83bdd89a62f66f509a55a4a307239c782 \
--hash=sha256:7c92dfcdf7e0c540f9718b40b4c54516a968ef6b81567b75df81866a1af2189d \
--hash=sha256:7f3db21223a8bb4ffcf6c36b9c20d38278967723b47fce249dcb6ec6d4082b83 \
--hash=sha256:7fa9deba4c904e76870e08324adff94ec3a4bc56a50bbe1a9f859a4aed11c0d2 \
--hash=sha256:88912cbcf68cc40037c113460a166ebfbbb24864ceebb89ad221ea346f22e995 \
--hash=sha256:94aa5bb817f1c747b21214f6ef83a022bcb63bf81e4dae2954768165c13a510b \
--hash=sha256:951e382a2ea47179ecb3e314e8c70f2e5189e3652ccbbcb71c6443dd71bc20fc \
--hash=sha256:978a500ae1184f602dc902977ec208c7cf02c10caae9c159b10976a7cb29f879 \
--hash=sha256:991c4a40171d87854b219cdf2ba56c1c34b3b3a8ebe5d1ab63bd357ff71271b2 \
--hash=sha256:9ca84187182743d2e6bbf9d3f79d3834db205cddc98add27ad20f2189d080a60 \
--hash=sha256:ae50bc839cf1ff549f55a3e55922563f246fb692f77497175a8d8d4cddc294da \
--hash=sha256:b7abae5b17e82d5f78aaa641077b4619c6ad204e30c6f3445d422acff5f35d3e \
--hash=sha256:b8fce0c961654f77c81a6ae1f2cd40633b41ef16a12ae02f0382ed6692f9bb90 \
--hash=sha256:d8f047d3647a5cd1b77b4580f35208c938da00c101a092571c85bcefaa2d725d \
--hash=sha256:f1785b31bf428e964a9670dd4f721023f2741ef7fd67c663bf01e3d4d3f9ec2a \
--hash=sha256:fcf70e1e9d38035a15482e954ba064f3b701cf84cfe571576d15af93ac2a2fb1

Просмотреть файл

@ -1,8 +0,0 @@
# The docker module removed support for docker engines < 1.21 starting with
# docker 3.0. Once we upgrade the docker running on our workers, we can remove
# this restriction here.
docker<3.0
docker-squash
# For compressing docker images
zstandard

Просмотреть файл

@ -1,104 +0,0 @@
#
# This file is autogenerated by pip-compile
# To update, run:
#
# pip-compile --generate-hashes --output-file requirements/py3.txt requirements/py3.in
#
certifi==2018.11.29 \
--hash=sha256:47f9c83ef4c0c621eaef743f133f09fa8a74a9b75f037e8624f83bd1b6626cb7 \
--hash=sha256:993f830721089fef441cdfeb4b2c8c9df86f0c63239f06bd025a76a7daddb033 \
# via requests
cffi==1.12.1 \
--hash=sha256:0b5f895714a7a9905148fc51978c62e8a6cbcace30904d39dcd0d9e2265bb2f6 \
--hash=sha256:27cdc7ba35ee6aa443271d11583b50815c4bb52be89a909d0028e86c21961709 \
--hash=sha256:2d4a38049ea93d5ce3c7659210393524c1efc3efafa151bd85d196fa98fce50a \
--hash=sha256:3262573d0d60fc6b9d0e0e6e666db0e5045cbe8a531779aa0deb3b425ec5a282 \
--hash=sha256:358e96cfffc185ab8f6e7e425c7bb028931ed08d65402fbcf3f4e1bff6e66556 \
--hash=sha256:37c7db824b5687fbd7ea5519acfd054c905951acc53503547c86be3db0580134 \
--hash=sha256:39b9554dfe60f878e0c6ff8a460708db6e1b1c9cc6da2c74df2955adf83e355d \
--hash=sha256:42b96a77acf8b2d06821600fa87c208046decc13bd22a4a0e65c5c973443e0da \
--hash=sha256:5b37dde5035d3c219324cac0e69d96495970977f310b306fa2df5910e1f329a1 \
--hash=sha256:5d35819f5566d0dd254f273d60cf4a2dcdd3ae3003dfd412d40b3fe8ffd87509 \
--hash=sha256:5df73aa465e53549bd03c819c1bc69fb85529a5e1a693b7b6cb64408dd3970d1 \
--hash=sha256:7075b361f7a4d0d4165439992d0b8a3cdfad1f302bf246ed9308a2e33b046bd3 \
--hash=sha256:7678b5a667b0381c173abe530d7bdb0e6e3b98e062490618f04b80ca62686d96 \
--hash=sha256:7dfd996192ff8a535458c17f22ff5eb78b83504c34d10eefac0c77b1322609e2 \
--hash=sha256:8a3be5d31d02c60f84c4fd4c98c5e3a97b49f32e16861367f67c49425f955b28 \
--hash=sha256:9812e53369c469506b123aee9dcb56d50c82fad60c5df87feb5ff59af5b5f55c \
--hash=sha256:9b6f7ba4e78c52c1a291d0c0c0bd745d19adde1a9e1c03cb899f0c6efd6f8033 \
--hash=sha256:a85bc1d7c3bba89b3d8c892bc0458de504f8b3bcca18892e6ed15b5f7a52ad9d \
--hash=sha256:aa6b9c843ad645ebb12616de848cc4e25a40f633ccc293c3c9fe34107c02c2ea \
--hash=sha256:bae1aa56ee00746798beafe486daa7cfb586cd395c6ce822ba3068e48d761bc0 \
--hash=sha256:bae96e26510e4825d5910a196bf6b5a11a18b87d9278db6d08413be8ea799469 \
--hash=sha256:bd78df3b594013b227bf31d0301566dc50ba6f40df38a70ded731d5a8f2cb071 \
--hash=sha256:c2711197154f46d06f73542c539a0ff5411f1951fab391e0a4ac8359badef719 \
--hash=sha256:d998c20e3deed234fca993fd6c8314cb7cbfda05fd170f1bd75bb5d7421c3c5a \
--hash=sha256:df4f840d77d9e37136f8e6b432fecc9d6b8730f18f896e90628712c793466ce6 \
--hash=sha256:f5653c2581acb038319e6705d4e3593677676df14b112f13e0b5b44b6a18df1a \
--hash=sha256:f7c7aa485a2e2250d455148470ffd0195eecc3d845122635202d7467d6f7b4cf \
--hash=sha256:f9e2c66a6493147de835f207f198540a56b26745ce4f272fbc7c2f2cfebeb729 \
# via zstandard
chardet==3.0.4 \
--hash=sha256:84ab92ed1c4d4f16916e05906b6b75a6c0fb5db821cc65e70cbd64a3e2a5eaae \
--hash=sha256:fc323ffcaeaed0e0a02bf4d117757b98aed530d9ed4531e3e15460124c106691 \
# via requests
docker-pycreds==0.4.0 \
--hash=sha256:6ce3270bcaf404cc4c3e27e4b6c70d3521deae82fb508767870fdbf772d584d4 \
--hash=sha256:7266112468627868005106ec19cd0d722702d2b7d5912a28e19b826c3d37af49 \
# via docker
docker-squash==1.0.7 \
--hash=sha256:95ca24fbeffa915c3d467b7ad538c6437a02f68aaa4e31e16c451c47a30a2169
docker==2.7.0 \
--hash=sha256:144248308e8ea31c4863c6d74e1b55daf97cc190b61d0fe7b7313ab920d6a76c \
--hash=sha256:c1d4e37b1ea03b2b6efdd0379640f6ea372fefe56efa65d4d17c34c6b9d54558
idna==2.8 \
--hash=sha256:c357b3f628cf53ae2c4c05627ecc484553142ca23264e593d327bcde5e9c3407 \
--hash=sha256:ea8b7f6188e6fa117537c3df7da9fc686d485087abf6ac197f9c46432f7e4a3c \
# via requests
pycparser==2.19 \
--hash=sha256:a988718abfad80b6b157acce7bf130a30876d27603738ac39f140993246b25b3 \
# via cffi
requests==2.21.0 \
--hash=sha256:502a824f31acdacb3a35b6690b5fbf0bc41d63a24a45c4004352b0242707598e \
--hash=sha256:7bf2a778576d825600030a110f3c0e3e8edc51dfaafe1c146e39a2027784957b \
# via docker
six==1.12.0 \
--hash=sha256:3350809f0555b11f552448330d0b52d5f24c91a322ea4a15ef22629740f3761c \
--hash=sha256:d16a0141ec1a18405cd4ce8b4613101da75da0e9a7aec5bdd4fa804d0e0eba73 \
# via docker, docker-pycreds, docker-squash, websocket-client
urllib3==1.24.1 \
--hash=sha256:61bf29cada3fc2fbefad4fdf059ea4bd1b4a86d2b6d15e1c7c0b582b9752fe39 \
--hash=sha256:de9529817c93f27c8ccbfead6985011db27bd0ddfcdb2d86f3f663385c6a9c22 \
# via requests
websocket-client==0.54.0 \
--hash=sha256:8c8bf2d4f800c3ed952df206b18c28f7070d9e3dcbd6ca6291127574f57ee786 \
--hash=sha256:e51562c91ddb8148e791f0155fdb01325d99bb52c4cdbb291aee7a3563fd0849 \
# via docker
zstandard==0.11.1 \
--hash=sha256:19f5ad81590acd20dbdfb930b87a035189778662fdc67ab8cbcc106269ed1be8 \
--hash=sha256:1a1db0c9774181e806a418c32d511aa085c7e2c28c257a58f6c107f5decb3109 \
--hash=sha256:22d7aa898f36f78108cc1ef0c8da8225f0add518441d815ad4fdd1d577378209 \
--hash=sha256:357873afdd7cd0e653d169c36ce837ce2b3e5926dd4a5c0f0476c813f6765373 \
--hash=sha256:3c31da5d78a7b07e722e8a3e0b1295bc9b316b7e90a1666659c451a42750ffe4 \
--hash=sha256:3f76562ec63fabc6f4b5be0cd986f911c97105c35c31b4d655b90c4d2fe07f40 \
--hash=sha256:42fa4462e0563fe17e73dfeb95eef9b00429b86282f8f6ca0e2765b1855a8324 \
--hash=sha256:51aad01a5709ca6f45768c69ffd4c887528e5ad9e09302426b735560752c4e82 \
--hash=sha256:6cd81819a02e57e38e27c53c5c0a7015e059b0e148a18bf27b46b4f808840879 \
--hash=sha256:717fd2494f222164396e03d08ef57174d2a889920b81ca49f276caf9381e6405 \
--hash=sha256:71c8711458212c973a9b719275db8111f22803e0caf675affde50703b96e9be1 \
--hash=sha256:76a331b5a6258fce3906551557db9be83bdd89a62f66f509a55a4a307239c782 \
--hash=sha256:7c92dfcdf7e0c540f9718b40b4c54516a968ef6b81567b75df81866a1af2189d \
--hash=sha256:7f3db21223a8bb4ffcf6c36b9c20d38278967723b47fce249dcb6ec6d4082b83 \
--hash=sha256:7fa9deba4c904e76870e08324adff94ec3a4bc56a50bbe1a9f859a4aed11c0d2 \
--hash=sha256:88912cbcf68cc40037c113460a166ebfbbb24864ceebb89ad221ea346f22e995 \
--hash=sha256:94aa5bb817f1c747b21214f6ef83a022bcb63bf81e4dae2954768165c13a510b \
--hash=sha256:951e382a2ea47179ecb3e314e8c70f2e5189e3652ccbbcb71c6443dd71bc20fc \
--hash=sha256:978a500ae1184f602dc902977ec208c7cf02c10caae9c159b10976a7cb29f879 \
--hash=sha256:991c4a40171d87854b219cdf2ba56c1c34b3b3a8ebe5d1ab63bd357ff71271b2 \
--hash=sha256:9ca84187182743d2e6bbf9d3f79d3834db205cddc98add27ad20f2189d080a60 \
--hash=sha256:ae50bc839cf1ff549f55a3e55922563f246fb692f77497175a8d8d4cddc294da \
--hash=sha256:b7abae5b17e82d5f78aaa641077b4619c6ad204e30c6f3445d422acff5f35d3e \
--hash=sha256:b8fce0c961654f77c81a6ae1f2cd40633b41ef16a12ae02f0382ed6692f9bb90 \
--hash=sha256:d8f047d3647a5cd1b77b4580f35208c938da00c101a092571c85bcefaa2d725d \
--hash=sha256:f1785b31bf428e964a9670dd4f721023f2741ef7fd67c663bf01e3d4d3f9ec2a \
--hash=sha256:fcf70e1e9d38035a15482e954ba064f3b701cf84cfe571576d15af93ac2a2fb1

Просмотреть файл

@ -1,54 +0,0 @@
#!/bin/bash -vex
set -v -e -x
export DEBIAN_FRONTEND=noninteractive
# Update apt-get lists
apt-get update -y
# Install dependencies
apt-get install -y --no-install-recommends \
socat \
python-requests \
python-requests-unixsocket \
python3.5 \
python3-minimal \
python3-requests \
python3-requests-unixsocket
# Extra dependencies only needed for image building. Will be removed at
# end of script.
apt-get install -y python-pip python3-pip
# Install mercurial
# shellcheck disable=SC1091
. /setup/common.sh
# shellcheck disable=SC1091
. /setup/install-mercurial.sh
# Install build-image.sh script
chmod +x /usr/local/bin/build-image.sh
chmod +x /usr/local/bin/run-task
chmod +x /usr/local/bin/download-and-compress
# Create workspace
mkdir -p /builds/worker/workspace
# We need to install for both Python 2 and 3 because `mach taskcluster-load-image`
# uses Python 2 and `download-and-compress` uses Python 3.
# We also need to make sure to explicitly install python3-distutils so that it doesn't get purged later
apt-get install -y python3-distutils
/usr/bin/pip -v install -r /setup/requirements-py2.txt
/usr/bin/pip3 -v install -r /setup/requirements-py3.txt
# python-pip only needed to install python-zstandard. Removing it removes
# several hundred MB of dependencies from the image.
apt-get purge -y python-pip python3-pip
# Purge apt-get caches to minimize image size
apt-get auto-remove -y
apt-get clean -y
rm -rf /var/lib/apt/lists/
# Remove this script
rm -rf /setup/

Просмотреть файл

@ -1,4 +1,3 @@
# %ARG DOCKER_IMAGE_PARENT
FROM $DOCKER_IMAGE_PARENT
MAINTAINER Mike Hommey <mhommey@mozilla.com>

Просмотреть файл

@ -1,4 +1,3 @@
# %ARG DOCKER_IMAGE_PARENT
FROM $DOCKER_IMAGE_PARENT
MAINTAINER Ben Hearsum <bhearsum@mozilla.com>

Просмотреть файл

@ -1,4 +1,3 @@
# %ARG DOCKER_IMAGE_PARENT
FROM $DOCKER_IMAGE_PARENT
LABEL maintainer="Andi-Bogdan Postelnicu <andi@mozilla.com>"
@ -12,8 +11,8 @@ ENV XZ_OPT=-T0
COPY topsrcdir/taskcluster/docker/recipes/prepare_openjdk.sh /tmp/prepare_openjdk.sh
RUN /tmp/prepare_openjdk.sh && rm /tmp/prepare_openjdk.sh
# %ARG TASKCLUSTER_ROOT_URL
# %ARG DOCKER_IMAGE_PACKAGES
ARG TASKCLUSTER_ROOT_URL
ARG DOCKER_IMAGE_PACKAGES
RUN /usr/local/sbin/setup_packages.sh $TASKCLUSTER_ROOT_URL $DOCKER_IMAGE_PACKAGES
RUN apt-get update && \

Просмотреть файл

@ -1,4 +1,3 @@
# %ARG DOCKER_IMAGE_PARENT
FROM $DOCKER_IMAGE_PARENT
MAINTAINER Rob Lemley <rob@thunderbird.net>
# Used by Thunderbird to build third party libraries for OTR messaging.

Просмотреть файл

@ -1,4 +1,3 @@
# %ARG DOCKER_IMAGE_PARENT
FROM $DOCKER_IMAGE_PARENT
MAINTAINER Mike Hommey <mhommey@mozilla.com>
@ -8,8 +7,8 @@ VOLUME /builds/worker/tooltool-cache
ENV XZ_OPT=-T0
# %ARG DOCKER_IMAGE_PACKAGES
# %ARG TASKCLUSTER_ROOT_URL
ARG DOCKER_IMAGE_PACKAGES
ARG TASKCLUSTER_ROOT_URL
RUN /usr/local/sbin/setup_packages.sh $TASKCLUSTER_ROOT_URL $DOCKER_IMAGE_PACKAGES
RUN apt-get update && \

Просмотреть файл

@ -1,4 +1,3 @@
# %ARG DOCKER_IMAGE_PARENT
FROM $DOCKER_IMAGE_PARENT
MAINTAINER Mike Hommey <mhommey@mozilla.com>

Просмотреть файл

@ -1,4 +1,3 @@
# %ARG DOCKER_IMAGE_PARENT
FROM $DOCKER_IMAGE_PARENT
MAINTAINER Barret Rennie <barret@mozilla.com>

Просмотреть файл

@ -1,4 +1,3 @@
# %ARG DOCKER_IMAGE_PARENT
FROM $DOCKER_IMAGE_PARENT
MAINTAINER Kartikaya Gupta <kgupta@mozilla.com>

Просмотреть файл

@ -1,4 +1,3 @@
# %ARG DOCKER_IMAGE_PARENT
FROM $DOCKER_IMAGE_PARENT
MAINTAINER Dzmitry Malyshau <dmalyshau@mozilla.com>

Просмотреть файл

@ -4,19 +4,21 @@
from __future__ import absolute_import, print_function, unicode_literals
import logging
import os
import re
import json
from collections import deque
import six
from six import text_type
import taskgraph
from taskgraph.transforms.base import TransformSequence
from taskgraph.transforms.task import _run_task_suffix
from .. import GECKO
from taskgraph.util.docker import (
generate_context_hash,
create_context_tar,
)
from taskgraph.util.taskcluster import get_root_url
from taskgraph.util.schema import (
Schema,
)
@ -26,8 +28,18 @@ from voluptuous import (
)
from .task import task_description_schema
logger = logging.getLogger(__name__)
CONTEXTS_DIR = 'docker-contexts'
DIGEST_RE = re.compile('^[0-9a-f]{64}$')
IMAGE_BUILDER_IMAGE = (
'taskcluster/image_builder:4.0.0'
"@sha256:"
"866c304445334703b68653e1390816012c9e6bdabfbd1906842b5b229e8ed044"
)
transforms = TransformSequence()
docker_image_schema = Schema({
@ -101,6 +113,10 @@ def fill_template(config, tasks):
context_hashes = {}
if not taskgraph.fast and config.write_artifacts:
if not os.path.isdir(CONTEXTS_DIR):
os.makedirs(CONTEXTS_DIR)
for task in order_image_tasks(config, tasks):
image_name = task.pop('name')
job_symbol = task.pop('symbol')
@ -114,30 +130,33 @@ def fill_template(config, tasks):
raise Exception('Missing package job for {}-{}: {}'.format(
config.kind, image_name, p))
# Generating the context hash relies on arguments being set, so we
# set this now, although it's not the final value (it's a
# task-reference value, see further below). We add the package routes
# containing a hash to get the overall docker image hash, so changes
# to packages will be reflected in the docker image hash.
args['DOCKER_IMAGE_PACKAGES'] = ' '.join('<{}>'.format(p)
for p in packages)
if parent:
args['DOCKER_IMAGE_PARENT'] = '{}:{}'.format(parent, context_hashes[parent])
args['TASKCLUSTER_ROOT_URL'] = get_root_url(False)
if not taskgraph.fast:
context_path = os.path.join('taskcluster', 'docker', definition)
context_hash = generate_context_hash(
GECKO, context_path, image_name, args)
if config.write_artifacts:
context_file = os.path.join(CONTEXTS_DIR, '{}.tar.gz'.format(image_name))
logger.info("Writing {} for docker image {}".format(context_file, image_name))
context_hash = create_context_tar(
GECKO, context_path,
context_file,
args)
else:
context_hash = generate_context_hash(
GECKO, context_path,
args)
else:
if config.write_artifacts:
raise Exception("Can't write artifacts if `taskgraph.fast` is set.")
context_hash = '0'*40
digest_data = [context_hash]
digest_data += [json.dumps(args, sort_keys=True)]
context_hashes[image_name] = context_hash
description = 'Build the docker image {} for use by dependent tasks'.format(
image_name)
args['DOCKER_IMAGE_PACKAGES'] = ' '.join('<{}>'.format(p)
for p in packages)
# Adjust the zstandard compression level based on the execution level.
# We use faster compression for level 1 because we care more about
# end-to-end times. We use slower/better compression for other levels
@ -152,10 +171,7 @@ def fill_template(config, tasks):
'description': description,
'attributes': {'image_name': image_name},
'expires-after': '28 days' if config.params.is_try() else '1 year',
'scopes': [
'secrets:get:project/taskcluster/gecko/hgfingerprint',
'secrets:get:project/taskcluster/gecko/hgmointernal',
],
'scopes': [],
'treeherder': {
'symbol': job_symbol,
'platform': 'taskcluster-images/opt',
@ -169,23 +185,24 @@ def fill_template(config, tasks):
'os': 'linux',
'artifacts': [{
'type': 'file',
'path': '/builds/worker/workspace/artifacts/image.tar.zst',
'path': '/workspace/image.tar.zst',
'name': 'public/image.tar.zst',
}],
'env': {
'HG_STORE_PATH': '/builds/worker/checkouts/hg-store',
'CONTEXT_TASK_ID': {'task-reference': "<decision>"},
'CONTEXT_PATH': "public/docker-contexts/{}.tar.gz".format(image_name),
'HASH': context_hash,
'PROJECT': config.params['project'],
'IMAGE_NAME': image_name,
'DOCKER_IMAGE_ZSTD_LEVEL': zstd_level,
'DOCKER_BUILD_ARGS': {'task-reference': six.ensure_text(json.dumps(args))},
'GECKO_BASE_REPOSITORY': config.params['base_repository'],
'GECKO_HEAD_REPOSITORY': config.params['head_repository'],
'GECKO_HEAD_REV': config.params['head_rev'],
},
'chain-of-trust': True,
'docker-in-docker': True,
'taskcluster-proxy': True,
'max-run-time': 7200,
# FIXME: We aren't currently propagating the exit code
},
}
# Retry for 'funsize-update-generator' if exit status code is -1
@ -194,45 +211,8 @@ def fill_template(config, tasks):
worker = taskdesc['worker']
# We use the in-tree image_builder image to build docker images, but
# that can't be used to build the image_builder image itself,
# obviously. So we fall back to an image on docker hub, identified
# by hash. After the image-builder image is updated, it's best to push
# and update this hash as well, to keep image-builder builds up to date.
if image_name == 'image_builder':
hash = 'sha256:c6622fd3e5794842ad83d129850330b26e6ba671e39c58ee288a616a3a1c4c73'
worker['docker-image'] = 'taskcluster/image_builder@' + hash
# Keep in sync with the Dockerfile used to generate the
# docker image whose digest is referenced above.
worker['volumes'] = [
'/builds/worker/checkouts',
'/builds/worker/workspace',
]
cache_name = 'imagebuilder-v1'
else:
worker['docker-image'] = {'in-tree': 'image_builder'}
cache_name = 'imagebuilder-sparse-{}'.format(_run_task_suffix())
# Force images built against the in-tree image builder to
# have a different digest by adding a fixed string to the
# hashed data.
# Append to this data whenever the image builder's output behavior
# is changed, in order to force all downstream images to be rebuilt and
# cached distinctly.
digest_data.append('image_builder')
# Updated for squashing images in Bug 1527394
digest_data.append('squashing layers')
worker['caches'] = [{
'type': 'persistent',
'name': cache_name,
'mount-point': '/builds/worker/checkouts',
}]
for k, v in args.items():
if k == 'DOCKER_IMAGE_PACKAGES':
worker['env'][k] = {'task-reference': v}
else:
worker['env'][k] = v
worker['docker-image'] = IMAGE_BUILDER_IMAGE
digest_data.append("image-builder-image:{}".format(IMAGE_BUILDER_IMAGE))
if packages:
deps = taskdesc.setdefault('dependencies', {})
@ -242,7 +222,7 @@ def fill_template(config, tasks):
if parent:
deps = taskdesc.setdefault('dependencies', {})
deps['parent'] = 'build-docker-image-{}'.format(parent)
worker['env']['DOCKER_IMAGE_PARENT_TASK'] = {
worker['env']['PARENT_TASK_ID'] = {
'task-reference': '<parent>',
}
if 'index' in task:

Просмотреть файл

@ -555,12 +555,9 @@ def build_docker_worker_payload(config, task, task_def):
if isinstance(worker.get('docker-image'), text_type):
out_of_tree_image = worker['docker-image']
run_task = run_task or out_of_tree_image.startswith(
'taskcluster/image_builder')
else:
out_of_tree_image = None
image = worker.get('docker-image', {}).get('in-tree')
run_task = run_task or image == 'image_builder'
if 'caches' in worker:
caches = {}