Import taskcluster config from application-services

Commit 496e5b54911fc4d2db2287e297e6391710353121 from github.com/mozilla/application-services
This commit is contained in:
Jan-Erik Rediger 2019-09-30 10:28:02 +02:00
Родитель 6de05c1ae5
Коммит 1f92c3fb7e
15 изменённых файлов: 2541 добавлений и 0 удалений

215
.taskcluster.yml Normal file
Просмотреть файл

@ -0,0 +1,215 @@
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
version: 1
policy:
pullRequests: public
tasks:
$let:
decision_task_id: {$eval: as_slugid("decision_task")}
expires_in: {$fromNow: '1 year'}
scheduler_id: taskcluster-github
# We define the following variable at the very top, because they are used in the
# default definition
head_branch:
$if: 'tasks_for == "github-pull-request"'
then: ${event.pull_request.head.ref}
else:
$if: 'tasks_for == "github-push"'
then: ${event.ref}
else: ${event.release.target_commitish}
head_rev:
$if: 'tasks_for == "github-pull-request"'
then: ${event.pull_request.head.sha}
else:
$if: 'tasks_for == "github-push"'
then: ${event.after}
else: ${event.release.tag_name}
repository:
$if: 'tasks_for == "github-pull-request"'
then: ${event.pull_request.head.repo.html_url}
else: ${event.repository.html_url}
is_repo_trusted:
# Pull requests on main repository can't be trusted because anybody can open a PR on it, without a review
$if: 'tasks_for in ["github-push", "github-release"] && event.repository.html_url == "https://github.com/mozilla/application-services"'
then: true
else: false
user:
# dependabot-preview[bot]@users.noreply.github.com doesn't validate as email.
# It would be easier if TC simply didn't enforce an email format for "owner".
$if: 'event.sender.login == "dependabot-preview[bot]"'
then: dependabot
else: ${event.sender.login}
in:
$let:
images_worker_type:
$if: 'is_repo_trusted'
then: app-services-3-images
else: app-services-1-images
decision_worker_type:
$if: 'is_repo_trusted'
then: app-services-3-decision
else: app-services-1-decision
build_worker_type:
$if: 'is_repo_trusted'
then: app-services-3-b-linux
else: app-services-1-b-linux
# TODO: revisit once bug 1533314 is done to possibly infer better priorities
tasks_priority: highest
in:
$let:
default_task_definition:
taskId: ${decision_task_id}
taskGroupId: ${decision_task_id}
schedulerId: ${scheduler_id}
created: {$fromNow: ''}
deadline: {$fromNow: '4 hours'}
expires: ${expires_in}
provisionerId: aws-provisioner-v1
workerType: ${decision_worker_type}
priority: ${tasks_priority}
requires: all-completed
retries: 5
scopes:
- queue:create-task:${tasks_priority}:aws-provisioner-v1/${build_worker_type}
- queue:create-task:${tasks_priority}:aws-provisioner-v1/${images_worker_type}
- queue:route:statuses
- queue:route:notify.email.*
- queue:scheduler-id:${scheduler_id}
# So that we can cache task outputs for re-use.
- "queue:route:index.project.application-services.*"
# So that we can re-use Gradle/Cargo/sccache bits between tasks.
- "docker-worker:cache:application-services-*"
# So that we can fetch the macOS SDK from internal tooltool.
- project:releng:services/tooltool/api/download/internal
routes:
- statuses
metadata:
owner: &task_owner ${user}@users.noreply.github.com
source: &task_source ${repository}/raw/${head_rev}/.taskcluster.yml
extra:
tasks_for: ${tasks_for}
payload:
artifacts:
public/task-graph.json:
type: file
path: /repo/task-graph.json
expires: ${expires_in}
public/actions.json:
type: file
path: /repo/actions.json
expires: ${expires_in}
public/parameters.yml:
type: file
path: /repo/parameters.yml
expires: ${expires_in}
maxRunTime: {$eval: '20 * 60'}
# https://github.com/servo/taskcluster-bootstrap-docker-images#decision-task
image: "servobrowser/taskcluster-bootstrap:decision-task@sha256:28045b7ec0485ef363f8cb14f194008b47e9ede99f2ea40a1e945e921fce976e"
command: # TODO: servo decision-task image doesn't include pyyaml.
- /bin/bash
- --login
- -cx
- >-
python3 -m pip install --upgrade pip &&
python3 -m pip install pyyaml &&
git init repo &&
cd repo &&
git fetch --tags ${repository} ${head_branch} &&
git reset --hard ${head_rev} &&
python3 automation/taskcluster/decision_task.py
env:
APPSERVICES_HEAD_REPOSITORY: ${repository}
APPSERVICES_HEAD_BRANCH: ${head_branch}
APPSERVICES_HEAD_REV: ${head_rev}
BUILD_WORKER_TYPE: ${build_worker_type}
IMAGES_WORKER_TYPE: ${images_worker_type}
TASK_FOR: ${tasks_for}
TASK_OWNER: *task_owner
TASK_SOURCE: *task_source
features:
taskclusterProxy: true
in:
$match:
"tasks_for == 'github-pull-request' && event['action'] in ['opened', 'reopened', 'edited', 'synchronize']":
$let:
pull_request_title: ${event.pull_request.title}
pull_request_number: ${event.pull_request.number}
pull_request_url: ${event.pull_request.html_url}
in:
$mergeDeep:
- {$eval: 'default_task_definition'}
- payload:
env:
GITHUB_PR_TITLE: ${pull_request_title}
- metadata:
name: 'Application Services - Decision task (Pull Request #${pull_request_number})'
description: 'Building and testing Application Services - triggered by [#${pull_request_number}](${pull_request_url})'
"tasks_for == 'github-push' && head_branch == 'refs/heads/master'":
$mergeDeep:
- {$eval: 'default_task_definition'}
- metadata:
name: Application Services - Decision task (master)
description: Schedules the build and test tasks for Application Services.
"tasks_for == 'github-release' && event['action'] == 'published'":
$let:
is_staging:
$if: 'event.repository.html_url != "https://github.com/mozilla/application-services"'
then: true
else: false
in:
$let:
beetmover_worker_type:
$if: 'is_staging'
then: appsv-beetmover-dev
else: appsv-beetmover-v1
beetmover_bucket:
$if: 'is_staging'
then: maven-staging
else: maven-production
beetmover_bucket_public_url:
$if: 'is_staging'
then: https://maven-default.stage.mozaws.net/
else: https://maven.mozilla.org/
tag: ${event.release.tag_name}
release_task_definition:
payload:
features:
chainOfTrust: true
scopes:
# So that we can publish on Maven using beetmover
- project:mozilla:application-services:releng:beetmover:action:push-to-maven
in:
$mergeDeep:
- {$eval: 'default_task_definition'}
- {$eval: 'release_task_definition'}
- $if: 'is_staging'
then:
scopes:
- project:mozilla:application-services:releng:signing:cert:dep-signing
- queue:create-task:scriptworker-prov-v1/appsv-signing-dep-v1
else:
scopes:
- project:mozilla:application-services:releng:signing:cert:release-signing
- queue:create-task:scriptworker-prov-v1/appsv-signing-v1
# So that we can upload symbols to Socorro
- "secrets:get:project/application-services/symbols-token"
- payload:
env:
IS_STAGING: ${is_staging}
BEETMOVER_WORKER_TYPE: ${beetmover_worker_type}
BEETMOVER_BUCKET: ${beetmover_bucket}
BEETMOVER_BUCKET_PUBLIC_URL: ${beetmover_bucket_public_url}
- scopes:
# So that we can publish on Maven using beetmover
- project:mozilla:application-services:releng:beetmover:bucket:${beetmover_bucket}
- queue:create-task:${tasks_priority}:scriptworker-prov-v1/${beetmover_worker_type}
- metadata:
name: Application Services - Decision task (${tag})
description: Build and publish release versions.

Просмотреть файл

@ -0,0 +1,34 @@
#!/usr/bin/env bash
#
# A simple check that our release .aar files are a reasonable size.
# If this fails then something has gone wrong with the build process,
# such as pulling in unwanted dependencies or failing to strip debug symbols.
set -eu
if [[ "$#" -ne 2 ]]
then
echo "Usage:"
echo "./automation/check_artifact_size.sh <buildDir> <artifactId>"
exit 1
fi
BUILD_DIR="$1"
ARTIFACT_ID="$2"
# Even our largest .aar should be less than 35M.
# Seems like a lot? They include compiled rust code for 4 architectures.
# We expect this size to decrease over time as we make changes to the way
# we perform megazord builds, but at least it's an upper bound for now...
LIMIT=36700160
if [[ -d "${BUILD_DIR}" ]]; then
while IFS= read -r -d '' AAR_FILE; do
SIZE=$(du -b "${AAR_FILE}" | cut -f 1)
if [[ "${SIZE}" -gt "${LIMIT}" ]]; then
echo "ERROR: Build artifact is unacceptably large." >&2
du -h "${AAR_FILE}" >&2
exit 1
fi
done < <(find "${BUILD_DIR}" -path "*/${ARTIFACT_ID}/*" -name "*.aar" -print0)
fi

32
automation/check_megazord.sh Executable file
Просмотреть файл

@ -0,0 +1,32 @@
#!/usr/bin/env bash
set -euvx
if [[ "$#" -ne 1 ]]
then
echo "Usage:"
echo "./automation/check_megazord.sh <megazord_name>"
exit 1
fi
MEGAZORD_NAME=$1
# The `full-megazord` is `libmegazord.so`. Eventually we should figure out a way
# to avoid hardcoding this check, but for now it's not too bad.
if [[ "$MEGAZORD_NAME" = "full" ]]; then
MEGAZORD_NAME="megazord"
fi
# For now just check x86_64 since we only run this for PRs
TARGET_ARCHS=("x86_64") # "x86" "arm64" "arm")
NM_BINS=("x86_64-linux-android-nm") # "i686-linux-android-nm" "aarch64-linux-android-nm" "arm-linux-androideabi-nm")
RUST_TRIPLES=("x86_64-linux-android") # "i686-linux-android" "aarch64-linux-android" "armv7-linux-androideabi")
FORBIDDEN_SYMBOL="viaduct_detect_reqwest_backend"
for i in "${!TARGET_ARCHS[@]}"; do
NM="${ANDROID_NDK_TOOLCHAIN_DIR}/${TARGET_ARCHS[$i]}-${ANDROID_NDK_API_VERSION}/bin/${NM_BINS[$i]}"
MEGAZORD_PATH="./target/${RUST_TRIPLES[i]}/release/lib${MEGAZORD_NAME}.so"
printf '\nTesting if %s contains the legacy/test-only HTTP stack\n\n' "${MEGAZORD_PATH}"
# Returns error status on failure, which will cause us to exit because of set -e.
./testing/err-if-symbol.sh "$NM" "${MEGAZORD_PATH}" "${FORBIDDEN_SYMBOL}"
done

Просмотреть файл

@ -0,0 +1,9 @@
#!/usr/bin/env bash
if [[ ! -f "$PWD/automation/lint_bash_scripts.sh" ]]
then
echo "lint_bash_scripts.sh must be executed from the root directory."
exit 1
fi
find . -type f -name '*.sh' -print0 | xargs -0 shellcheck --external-sources

Просмотреть файл

@ -0,0 +1,9 @@
[
{
"size": 8373536,
"visibility": "public",
"digest": "151fe6fbbd56410148b240da24128302059ff955e3bbd34747aa7bc89a8878e9a4477e1bdde444ccd268fc45b526eff28689ce301a8ae93d8049f9c0a8e7be4a",
"algorithm": "sha512",
"filename": "dump_syms"
}
]

Просмотреть файл

@ -0,0 +1,2 @@
redo==2.0.2
requests==2.21.0

Просмотреть файл

@ -0,0 +1,738 @@
#!/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Usage: symbolstore.py <params> <dump_syms path> <symbol store path>
# <debug info files or dirs>
# Runs dump_syms on each debug info file specified on the command line,
# then places the resulting symbol file in the proper directory
# structure in the symbol store path. Accepts multiple files
# on the command line, so can be called as part of a pipe using
# find <dir> | xargs symbolstore.pl <dump_syms> <storepath>
# But really, you might just want to pass it <dir>.
#
# Parameters accepted:
# -c : Copy debug info files to the same directory structure
# as sym files. On Windows, this will also copy
# binaries into the symbol store.
# -a "<archs>" : Run dump_syms -a <arch> for each space separated
# cpu architecture in <archs> (only on OS X)
# -s <srcdir> : Use <srcdir> as the top source directory to
# generate relative filenames.
from __future__ import print_function
import errno
import sys
import platform
import io
import os
import re
import shutil
import textwrap
import subprocess
import time
import ctypes
from optparse import OptionParser
# Utility classes
class VCSFileInfo:
""" A base class for version-controlled file information. Ensures that the
following attributes are generated only once (successfully):
self.root
self.clean_root
self.revision
self.filename
The attributes are generated by a single call to the GetRoot,
GetRevision, and GetFilename methods. Those methods are explicitly not
implemented here and must be implemented in derived classes. """
def __init__(self, file):
if not file:
raise ValueError
self.file = file
def __getattr__(self, name):
""" __getattr__ is only called for attributes that are not set on self,
so setting self.[attr] will prevent future calls to the GetRoot,
GetRevision, and GetFilename methods. We don't set the values on
failure on the off chance that a future call might succeed. """
if name == "root":
root = self.GetRoot()
if root:
self.root = root
return root
elif name == "clean_root":
clean_root = self.GetCleanRoot()
if clean_root:
self.clean_root = clean_root
return clean_root
elif name == "revision":
revision = self.GetRevision()
if revision:
self.revision = revision
return revision
elif name == "filename":
filename = self.GetFilename()
if filename:
self.filename = filename
return filename
raise AttributeError
def GetRoot(self):
""" This method should return the unmodified root for the file or 'None'
on failure. """
raise NotImplementedError
def GetCleanRoot(self):
""" This method should return the repository root for the file or 'None'
on failure. """
raise NotImplementedError
def GetRevision(self):
""" This method should return the revision number for the file or 'None'
on failure. """
raise NotImplementedError
def GetFilename(self):
""" This method should return the repository-specific filename for the
file or 'None' on failure. """
raise NotImplementedError
# This regex finds out the org and the repo from a git remote URL.
githubRegex = re.compile(r'^(?:https://github.com/|git@github.com:)([^/]+)/([^/]+?)(?:.git)?$')
def read_output(*args):
(stdout, _) = subprocess.Popen(args=args, stdout=subprocess.PIPE).communicate()
return stdout.decode("utf-8").rstrip()
class GitHubRepoInfo:
"""
Info about a locally cloned Git repository that has its "origin" remote on GitHub.
"""
def __init__(self, path):
self.path = path
if 'APPSERVICES_HEAD_REPOSITORY' in os.environ:
remote_url = os.environ['APPSERVICES_HEAD_REPOSITORY']
else:
remote_url = read_output('git', '-C', path, 'remote', 'get-url', 'origin')
match = githubRegex.match(remote_url)
if match is None:
print(textwrap.dedent("""\
Could not determine repo info for %s (%s). This is probably because
the repo is not one that was cloned from a GitHub remote.""") % (path), file=sys.stderr)
sys.exit(1)
(org, repo) = match.groups()
cleanroot = "github.com/%s/%s" % (org, repo)
# Try to get a tag if possible, otherwise get a git hash.
rev = None
p = subprocess.Popen(args=['git', '-C', path, 'name-rev', '--name-only', '--tags', 'HEAD', '--no-undefined'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout, _) = p.communicate()
if p.returncode == 0:
rev = stdout.decode("utf-8").rstrip()
else:
rev = read_output('git', '-C', path, 'rev-parse', 'HEAD')
root = "https://raw.githubusercontent.com/%s/%s/%s/" % (org, repo, rev)
self.rev = rev
self.root = root
self.cleanroot = cleanroot
def GetFileInfo(self, file):
return GitFileInfo(file, self)
class GitFileInfo(VCSFileInfo):
def __init__(self, file, repo):
VCSFileInfo.__init__(self, file)
self.repo = repo
self.file = os.path.relpath(file, repo.path)
# Root is used by for source server indexing
def GetRoot(self):
return self.repo.path
# Cleanroot is used for filenames
def GetCleanRoot(self):
return self.repo.cleanroot
def GetRevision(self):
return self.repo.rev
def GetFilename(self):
if self.revision and self.clean_root:
return "git:%s:%s:%s" % (self.clean_root, self.file, self.revision)
return self.file
# Utility functions
# A cache of files for which VCS info has already been determined. Used to
# prevent extra filesystem activity or process launching.
vcsFileInfoCache = {}
if platform.system() == 'Windows':
def normpath(path):
'''
Normalize a path using `GetFinalPathNameByHandleW` to get the
path with all components in the case they exist in on-disk, so
that making links to a case-sensitive server (hg.mozilla.org) works.
This function also resolves any symlinks in the path.
'''
# Return the original path if something fails, which can happen for paths that
# don't exist on this system (like paths from the CRT).
result = path
ctypes.windll.kernel32.SetErrorMode(ctypes.c_uint(1))
if not isinstance(path, unicode):
path = unicode(path, sys.getfilesystemencoding())
handle = ctypes.windll.kernel32.CreateFileW(path,
# GENERIC_READ
0x80000000,
# FILE_SHARE_READ
1,
None,
# OPEN_EXISTING
3,
# FILE_FLAG_BACKUP_SEMANTICS
# This is necessary to open
# directory handles.
0x02000000,
None)
if handle != -1:
size = ctypes.windll.kernel32.GetFinalPathNameByHandleW(handle,
None,
0,
0)
buf = ctypes.create_unicode_buffer(size)
if ctypes.windll.kernel32.GetFinalPathNameByHandleW(handle,
buf,
size,
0) > 0:
# The return value of GetFinalPathNameByHandleW uses the
# '\\?\' prefix.
result = buf.value.encode(sys.getfilesystemencoding())[4:]
ctypes.windll.kernel32.CloseHandle(handle)
return result
else:
# Just use the os.path version otherwise.
normpath = os.path.normpath
def IsInDir(file, dir):
# the lower() is to handle win32+vc8, where
# the source filenames come out all lowercase,
# but the srcdir can be mixed case
return os.path.abspath(file).lower().startswith(os.path.abspath(dir).lower())
def GetVCSFilenameFromSrcdir(file, srcdir):
if srcdir not in Dumper.srcdirRepoInfo:
# Not in cache, so find it and cache it
if os.path.isdir(os.path.join(srcdir, '.git')):
Dumper.srcdirRepoInfo[srcdir] = GitHubRepoInfo(srcdir)
else:
# Unknown VCS or file is not in a repo.
return None
return Dumper.srcdirRepoInfo[srcdir].GetFileInfo(file)
def GetVCSFilename(file, srcdirs):
"""Given a full path to a file, and the top source directory,
look for version control information about this file, and return
a tuple containing
1) a specially formatted filename that contains the VCS type,
VCS location, relative filename, and revision number, formatted like:
vcs:vcs location:filename:revision
For example:
cvs:cvs.mozilla.org/cvsroot:mozilla/browser/app/nsBrowserApp.cpp:1.36
2) the unmodified root information if it exists"""
(path, filename) = os.path.split(file)
if path == '' or filename == '':
return (file, None)
fileInfo = None
root = ''
if file in vcsFileInfoCache:
# Already cached this info, use it.
fileInfo = vcsFileInfoCache[file]
else:
for srcdir in srcdirs:
if not IsInDir(file, srcdir):
continue
fileInfo = GetVCSFilenameFromSrcdir(file, srcdir)
if fileInfo:
vcsFileInfoCache[file] = fileInfo
break
if fileInfo:
file = fileInfo.filename
root = fileInfo.root
# we want forward slashes on win32 paths
return (file.replace("\\", "/"), root)
def GetPlatformSpecificDumper(**kwargs):
"""This function simply returns a instance of a subclass of Dumper
that is appropriate for the current platform."""
return {'WINNT': Dumper_Win32,
'Linux': Dumper_Linux,
'Darwin': Dumper_Mac}[platform.system()](**kwargs)
# Git source indexing cargo culted from https://gist.github.com/baldurk/c6feb31b0305125c6d1a
def SourceIndex(fileStream, outputPath, vcs_root):
"""Takes a list of files, writes info to a data block in a .stream file"""
# Creates a .pdb.stream file in the mozilla\objdir to be used for source indexing
# Create the srcsrv data block that indexes the pdb file
result = True
pdbStreamFile = open(outputPath, "w")
pdbStreamFile.write('''SRCSRV: ini ------------------------------------------------\r\nVERSION=2\r\nINDEXVERSION=2\r\nVERCTRL=http\r\nSRCSRV: variables ------------------------------------------\r\nHTTP_ALIAS=''')
pdbStreamFile.write(vcs_root)
pdbStreamFile.write('''\r\nHTTP_EXTRACT_TARGET=%HTTP_ALIAS%/%var3%/%var2%\r\nSRCSRVTRG=%http_extract_target%\r\nSRCSRV: source files ---------------------------------------\r\n''')
pdbStreamFile.write(fileStream) # can't do string interpolation because the source server also uses this and so there are % in the above
pdbStreamFile.write("SRCSRV: end ------------------------------------------------\r\n\n")
pdbStreamFile.close()
return result
class Dumper:
"""This class can dump symbols from a file with debug info, and
store the output in a directory structure that is valid for use as
a Breakpad symbol server. Requires a path to a dump_syms binary--
|dump_syms| and a directory to store symbols in--|symbol_path|.
Optionally takes a list of processor architectures to process from
each debug file--|archs|, the full path to the top source
directory--|srcdir|, for generating relative source file names,
and an option to copy debug info files alongside the dumped
symbol files--|copy_debug|, mostly useful for creating a
Microsoft Symbol Server from the resulting output.
You don't want to use this directly if you intend to process files.
Instead, call GetPlatformSpecificDumper to get an instance of a
subclass."""
srcdirRepoInfo = {}
def __init__(self, dump_syms, symbol_path,
archs=None,
srcdirs=[],
copy_debug=False,
vcsinfo=False,
srcsrv=False,
file_mapping=None):
# popen likes absolute paths, at least on windows
self.dump_syms = os.path.abspath(dump_syms)
self.symbol_path = symbol_path
if archs is None:
# makes the loop logic simpler
self.archs = ['']
else:
self.archs = ['-a %s' % a for a in archs.split()]
# Any paths that get compared to source file names need to go through normpath.
self.srcdirs = [normpath(s) for s in srcdirs]
self.copy_debug = copy_debug
self.vcsinfo = vcsinfo
self.srcsrv = srcsrv
self.file_mapping = file_mapping or {}
# subclasses override this
def ShouldProcess(self, file):
return True
def RunFileCommand(self, file):
"""Utility function, returns the output of file(1)"""
# we use -L to read the targets of symlinks,
# and -b to print just the content, not the filename
return read_output('file', '-Lb', file)
# This is a no-op except on Win32
def SourceServerIndexing(self, debug_file, guid, sourceFileStream, vcs_root):
return ""
# subclasses override this if they want to support this
def CopyDebug(self, file, debug_file, guid, code_file, code_id):
pass
def Process(self, file_to_process):
"""Process the given file."""
if self.ShouldProcess(os.path.abspath(file_to_process)):
self.ProcessFile(file_to_process)
else:
print("Cannot process file %s. Skipping." % file_to_process)
def ProcessFile(self, file, dsymbundle=None):
"""Dump symbols from these files into a symbol file, stored
in the proper directory structure in |symbol_path|; processing is performed
asynchronously, and Finish must be called to wait for it complete and cleanup.
All files after the first are fallbacks in case the first file does not process
successfully; if it does, no other files will be touched."""
print("Beginning work for file: %s" % file, file=sys.stderr)
for arch_num, arch in enumerate(self.archs):
self.ProcessFileWork(file, arch_num, arch, None, dsymbundle)
def dump_syms_cmdline(self, file, arch, dsymbundle=None):
'''
Get the commandline used to invoke dump_syms.
'''
# The Mac dumper overrides this.
return [self.dump_syms, file]
def ProcessFileWork(self, file, arch_num, arch, vcs_root, dsymbundle=None):
t_start = time.time()
print("Processing file: %s" % file, file=sys.stderr)
sourceFileStream = ''
code_id, code_file = None, None
try:
cmd = self.dump_syms_cmdline(file, arch, dsymbundle=dsymbundle)
print(' '.join(cmd), file=sys.stderr)
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=open(os.devnull, 'wb'))
stdout = io.TextIOWrapper(proc.stdout, encoding="utf-8")
module_line = stdout.readline()
if module_line.startswith("MODULE"):
# MODULE os cpu guid debug_file
(guid, debug_file) = (module_line.split())[3:5]
# strip off .pdb extensions, and append .sym
sym_file = re.sub("\.pdb$", "", debug_file) + ".sym"
# we do want forward slashes here
rel_path = os.path.join(debug_file,
guid,
sym_file).replace("\\", "/")
full_path = os.path.normpath(os.path.join(self.symbol_path,
rel_path))
try:
os.makedirs(os.path.dirname(full_path))
except OSError: # already exists
pass
f = open(full_path, "w")
f.write(module_line)
# now process the rest of the output
for line in stdout:
if line.startswith("FILE"):
# FILE index filename
(x, index, filename) = line.rstrip().split(None, 2)
# We want original file paths for the source server.
sourcepath = filename
filename = normpath(filename)
if filename in self.file_mapping:
filename = self.file_mapping[filename]
if self.vcsinfo:
(filename, rootname) = GetVCSFilename(filename, self.srcdirs)
# sets vcs_root in case the loop through files were to end on an empty rootname
if vcs_root is None:
if rootname:
vcs_root = rootname
# gather up files with git for indexing
if filename.startswith("git"):
(vcs, checkout, source_file, revision) = filename.split(":", 3)
# Contrary to HG we do not include the revision as it is part of the
# repo URL.
sourceFileStream += sourcepath + "*" + source_file + "\r\n"
f.write("FILE %s %s\n" % (index, filename))
elif line.startswith("INFO CODE_ID "):
# INFO CODE_ID code_id code_file
# This gives some info we can use to
# store binaries in the symbol store.
bits = line.rstrip().split(None, 3)
if len(bits) == 4:
code_id, code_file = bits[2:]
f.write(line)
else:
# pass through all other lines unchanged
f.write(line)
f.close()
retcode = proc.wait()
if retcode != 0:
raise RuntimeError(
"dump_syms failed with error code %d" % retcode)
# we output relative paths so callers can get a list of what
# was generated
print(rel_path)
if self.srcsrv and vcs_root:
# add source server indexing to the pdb file
self.SourceServerIndexing(debug_file, guid, sourceFileStream, vcs_root)
# only copy debug the first time if we have multiple architectures
if self.copy_debug and arch_num == 0:
self.CopyDebug(file, debug_file, guid,
code_file, code_id)
except StopIteration:
pass
except Exception as e:
print("Unexpected error: %s" % str(e), file=sys.stderr)
raise
if dsymbundle:
shutil.rmtree(dsymbundle)
elapsed = time.time() - t_start
print('Finished processing %s in %.2fs' % (file, elapsed),
file=sys.stderr)
# Platform-specific subclasses. For the most part, these just have
# logic to determine what files to extract symbols from.
def locate_pdb(path):
'''Given a path to a binary, attempt to locate the matching pdb file with simple heuristics:
* Look for a pdb file with the same base name next to the binary
* Look for a pdb file with the same base name in the cwd
Returns the path to the pdb file if it exists, or None if it could not be located.
'''
path, ext = os.path.splitext(path)
pdb = path + '.pdb'
if os.path.isfile(pdb):
return pdb
# If there's no pdb next to the file, see if there's a pdb with the same root name
# in the cwd. We build some binaries directly into dist/bin, but put the pdb files
# in the relative objdir, which is the cwd when running this script.
base = os.path.basename(pdb)
pdb = os.path.join(os.getcwd(), base)
if os.path.isfile(pdb):
return pdb
return None
class Dumper_Win32(Dumper):
fixedFilenameCaseCache = {}
def ShouldProcess(self, file):
"""This function will allow processing of exe or dll files that have pdb
files with the same base name next to them."""
if file.endswith(".exe") or file.endswith(".dll"):
if locate_pdb(file) is not None:
return True
return False
def CopyDebug(self, file, debug_file, guid, code_file, code_id):
file = locate_pdb(file)
def compress(path):
compressed_file = path[:-1] + '_'
# ignore makecab's output
makecab = os.environ['MAKECAB']
success = subprocess.call([makecab, "-D",
"CompressionType=MSZIP",
path, compressed_file],
stdout=open(os.devnull, 'w'),
stderr=subprocess.STDOUT)
if success == 0 and os.path.exists(compressed_file):
os.unlink(path)
return True
return False
rel_path = os.path.join(debug_file,
guid,
debug_file).replace("\\", "/")
full_path = os.path.normpath(os.path.join(self.symbol_path,
rel_path))
shutil.copyfile(file, full_path)
if compress(full_path):
print(rel_path[:-1] + '_')
else:
print(rel_path)
# Copy the binary file as well
if code_file and code_id:
full_code_path = os.path.join(os.path.dirname(file),
code_file)
if os.path.exists(full_code_path):
rel_path = os.path.join(code_file,
code_id,
code_file).replace("\\", "/")
full_path = os.path.normpath(os.path.join(self.symbol_path,
rel_path))
try:
os.makedirs(os.path.dirname(full_path))
except OSError as e:
if e.errno != errno.EEXIST:
raise
shutil.copyfile(full_code_path, full_path)
if compress(full_path):
print(rel_path[:-1] + '_')
else:
print(rel_path)
def SourceServerIndexing(self, debug_file, guid, sourceFileStream, vcs_root):
# Creates a .pdb.stream file in the mozilla\objdir to be used for source indexing
streamFilename = debug_file + ".stream"
stream_output_path = os.path.abspath(streamFilename)
# Call SourceIndex to create the .stream file
result = SourceIndex(sourceFileStream, stream_output_path, vcs_root)
if self.copy_debug:
pdbstr_path = os.environ.get("PDBSTR_PATH")
pdbstr = os.path.normpath(pdbstr_path)
subprocess.call([pdbstr, "-w", "-p:" + os.path.basename(debug_file),
"-i:" + os.path.basename(streamFilename), "-s:srcsrv"],
cwd=os.path.dirname(stream_output_path))
# clean up all the .stream files when done
os.remove(stream_output_path)
return result
class Dumper_Linux(Dumper):
objcopy = os.environ['OBJCOPY'] if 'OBJCOPY' in os.environ else 'objcopy'
def ShouldProcess(self, file):
"""This function will allow processing of files that are
executable, or end with the .so extension, and additionally
file(1) reports as being ELF files. It expects to find the file
command in PATH."""
if file.endswith(".so") or os.access(file, os.X_OK):
return self.RunFileCommand(file).startswith("ELF")
return False
def CopyDebug(self, file, debug_file, guid, code_file, code_id):
# We want to strip out the debug info, and add a
# .gnu_debuglink section to the object, so the debugger can
# actually load our debug info later.
# In some odd cases, the object might already have an irrelevant
# .gnu_debuglink section, and objcopy doesn't want to add one in
# such cases, so we make it remove it any existing one first.
file_dbg = file + ".dbg"
if subprocess.call([self.objcopy, '--only-keep-debug', file, file_dbg]) == 0 and \
subprocess.call([self.objcopy, '--remove-section', '.gnu_debuglink',
'--add-gnu-debuglink=%s' % file_dbg, file]) == 0:
rel_path = os.path.join(debug_file,
guid,
debug_file + ".dbg")
full_path = os.path.normpath(os.path.join(self.symbol_path,
rel_path))
shutil.move(file_dbg, full_path)
# gzip the shipped debug files
os.system("gzip -4 -f %s" % full_path)
print(rel_path + ".gz")
else:
if os.path.isfile(file_dbg):
os.unlink(file_dbg)
class Dumper_Mac(Dumper):
def ShouldProcess(self, file):
"""This function will allow processing of files that are
executable, or end with the .dylib extension, and additionally
file(1) reports as being Mach-O files. It expects to find the file
command in PATH."""
if file.endswith(".dylib") or os.access(file, os.X_OK):
return self.RunFileCommand(file).startswith("Mach-O")
return False
def ProcessFile(self, file):
print("Starting Mac pre-processing on file: %s" % file,
file=sys.stderr)
dsymbundle = self.GenerateDSYM(file)
if dsymbundle:
# kick off new jobs per-arch with our new list of files
Dumper.ProcessFile(self, file, dsymbundle=dsymbundle)
def dump_syms_cmdline(self, file, arch, dsymbundle=None):
'''
Get the commandline used to invoke dump_syms.
'''
# dump_syms wants the path to the original binary and the .dSYM
# in order to dump all the symbols.
if dsymbundle:
# This is the .dSYM bundle.
return [self.dump_syms] + arch.split() + ['-g', dsymbundle, file]
return Dumper.dump_syms_cmdline(self, file, arch)
def GenerateDSYM(self, file):
"""dump_syms on Mac needs to be run on a dSYM bundle produced
by dsymutil(1), so run dsymutil here and pass the bundle name
down to the superclass method instead."""
t_start = time.time()
print("Running Mac pre-processing on file: %s" % (file,),
file=sys.stderr)
dsymbundle = file + ".dSYM"
if os.path.exists(dsymbundle):
shutil.rmtree(dsymbundle)
# dsymutil takes --arch=foo instead of -a foo like everything else
try:
cmd = (["dsymutil"] +
[a.replace('-a ', '--arch=') for a in self.archs if a] +
[file])
print(' '.join(cmd), file=sys.stderr)
subprocess.check_call(cmd, stdout=open(os.devnull, 'w'))
except subprocess.CalledProcessError as e:
print('Error running dsymutil: %s' % str(e), file=sys.stderr)
raise
if not os.path.exists(dsymbundle):
# dsymutil won't produce a .dSYM for files without symbols
print("No symbols found in file: %s" % (file,), file=sys.stderr)
return False
elapsed = time.time() - t_start
print('Finished processing %s in %.2fs' % (file, elapsed),
file=sys.stderr)
return dsymbundle
def CopyDebug(self, file, debug_file, guid, code_file, code_id):
"""ProcessFile has already produced a dSYM bundle, so we should just
copy that to the destination directory. However, we'll package it
into a .tar.bz2 because the debug symbols are pretty huge, and
also because it's a bundle, so it's a directory. |file| here is the
the original filename."""
dsymbundle = file + '.dSYM'
rel_path = os.path.join(debug_file,
guid,
os.path.basename(dsymbundle) + ".tar.bz2")
full_path = os.path.abspath(os.path.join(self.symbol_path,
rel_path))
success = subprocess.call(["tar", "cjf", full_path, os.path.basename(dsymbundle)],
cwd=os.path.dirname(dsymbundle),
stdout=open(os.devnull, 'w'), stderr=subprocess.STDOUT)
if success == 0 and os.path.exists(full_path):
print(rel_path)
# Entry point if called as a standalone program
def main():
parser = OptionParser(usage="usage: %prog [options] <dump_syms binary> <symbol store path> <debug info files>")
parser.add_option("-c", "--copy",
action="store_true", dest="copy_debug", default=False,
help="Copy debug info files into the same directory structure as symbol files")
parser.add_option("-a", "--archs",
action="store", dest="archs",
help="Run dump_syms -a <arch> for each space separated cpu architecture in ARCHS (only on OS X)")
parser.add_option("-s", "--srcdir",
action="append", dest="srcdir", default=[],
help="Use SRCDIR to determine relative paths to source files")
parser.add_option("-v", "--vcs-info",
action="store_true", dest="vcsinfo",
help="Try to retrieve VCS info for each FILE listed in the output")
parser.add_option("-i", "--source-index",
action="store_true", dest="srcsrv", default=False,
help="Add source index information to debug files, making them suitable for use in a source server.")
(options, args) = parser.parse_args()
#check to see if the pdbstr.exe exists
if options.srcsrv:
pdbstr = os.environ.get("PDBSTR_PATH")
if not os.path.exists(pdbstr):
print("Invalid path to pdbstr.exe - please set/check PDBSTR_PATH.\n", file=sys.stderr)
sys.exit(1)
if len(args) < 3:
parser.error("not enough arguments")
exit(1)
dumper = GetPlatformSpecificDumper(dump_syms=args[0],
symbol_path=args[1],
copy_debug=options.copy_debug,
archs=options.archs,
srcdirs=options.srcdir,
vcsinfo=options.vcsinfo,
srcsrv=options.srcsrv)
dumper.Process(args[2])
# run main if run directly
if __name__ == "__main__":
main()

Просмотреть файл

@ -0,0 +1,81 @@
#!/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from __future__ import print_function
import redo
import requests
import shutil
import sys
import os
from optparse import OptionParser
DEFAULT_SYMBOL_URL = "https://symbols.mozilla.org/upload/"
MAX_RETRIES = 5
def Upload_Symbols(zip_file):
print("Uploading symbols file '{0}' to '{1}'".format(zip_file, DEFAULT_SYMBOL_URL), file=sys.stdout)
zip_name = os.path.basename(zip_file)
# Fetch the symbol server token from Taskcluster secrets.
secrets_url = "http://taskcluster/secrets/v1/secret/{}".format("project/application-services/symbols-token")
res = requests.get(secrets_url)
res.raise_for_status()
secret = res.json()
auth_token = secret["secret"]["token"]
if len(auth_token) == 0:
print("Failed to get the symbol token.", file=sys.stderr)
for i, _ in enumerate(redo.retrier(attempts=MAX_RETRIES), start=1):
print("Attempt %d of %d..." % (i, MAX_RETRIES))
try:
if zip_file.startswith("http"):
zip_arg = {"data": {"url", zip_file}}
else:
zip_arg = {"files": {zip_name: open(zip_file, 'rb')}}
r = requests.post(
DEFAULT_SYMBOL_URL,
headers={"Auth-Token": auth_token},
allow_redirects=False,
# Allow a longer read timeout because uploading by URL means the server
# has to fetch the entire zip file, which can take a while. The load balancer
# in front of symbols.mozilla.org has a 300 second timeout, so we'll use that.
timeout=(10, 300),
**zip_arg)
# 500 is likely to be a transient failure.
# Break out for success or other error codes.
if r.status_code < 500:
break
print("Error: {0}".format(r), file=sys.stderr)
except requests.exceptions.RequestException as e:
print("Error: {0}".format(e), file=sys.stderr)
print("Retrying...", file=sys.stdout)
else:
print("Maximum retries hit, giving up!", file=sys.stderr)
return False
if r.status_code >= 200 and r.status_code < 300:
print("Uploaded successfully", file=sys.stdout)
return True
print("Upload symbols failed: {0}".format(r), file=sys.stderr)
return False
def main():
parser = OptionParser(usage="usage: <symbol store path>")
(options, args) = parser.parse_args()
if len(args) < 1:
parser.error("not enough arguments")
exit(1)
symbol_path=args[0]
shutil.make_archive(symbol_path , "zip", symbol_path)
Upload_Symbols(symbol_path + ".zip")
# run main if run directly
if __name__ == "__main__":
main()

Просмотреть файл

@ -0,0 +1,114 @@
# Testing Application Services on Taskcluster
## Taskcluster GitHub integration
Taskcluster is very flexible and not necessarily tied to GitHub,
but it does have an optional [GitHub integration service] that you can enable
on a repository [as a GitHub App].
When enabled, this service gets notified for every push, pull request, or GitHub release.
It then schedules some tasks based on reading [`.taskcluster.yml`] in the corresponding commit.
This file contains templates for creating one or more tasks,
but the logic it can support is fairly limited.
So a common pattern is to have it only run a single initial task called a *decision task*
that can have complex logic based on code and data in the repository
to build an arbitrary [task graph].
[GitHub integration service]: https://docs.taskcluster.net/docs/manual/using/github
[as a GitHub App]: https://github.com/apps/taskcluster
[`.taskcluster.yml`]: https://docs.taskcluster.net/docs/reference/integrations/taskcluster-github/docs/taskcluster-yml-v1
[task graph]: https://docs.taskcluster.net/docs/manual/using/task-graph
## Application Services decision task
This repositorys [`.taskcluster.yml`][tc.yml] schedules a single task
that runs the Python 3 script [`etc/taskcluster/decision_task.py`](decision_task.py).
It is called a *decision task* as it is responsible for deciding what other tasks to schedule.
The Docker image that runs the decision task
is hosted on Docker Hub at [`servobrowser/taskcluster-bootstrap`][hub].
It is built by [Docker Hub automated builds] based on a `Dockerfile`
in the [`taskcluster-bootstrap-docker-images`] GitHub repository.
Hopefully, this image does not need to be modified often
as it only needs to clone the repository and run Python.
[tc.yml]: ../../.taskcluster.yml
[hub]: https://hub.docker.com/r/servobrowser/taskcluster-bootstrap/
[Docker Hub automated builds]: https://docs.docker.com/docker-hub/builds/
[`taskcluster-bootstrap-docker-images`]: https://github.com/servo/taskcluster-bootstrap-docker-images/
## In-tree Docker images
[Similar to Firefox][firefox], Application Services decision task supports running other tasks in
Docker images built on-demand, based on `Dockerfile`s in the main repository. Modifying a
`Dockerfile` and relying on those new changes can be done in the same pull request or commit.
To avoid rebuilding images on every pull request,
they are cached based on a hash of the source `Dockerfile`.
For now, to support this hashing, we make `Dockerfile`s be self-contained (with one exception).
Images are built without a [context],
so instructions like [`COPY`] cannot be used because there is nothing to copy from.
The exception is that the decision task adds support for a non-standard include directive:
when a `Dockerfile` first line is `% include` followed by a filename,
that line is replaced with the content of that file.
For example,
[`automation/taskcluster/docker/build.dockerfile`](docker/build.dockerfile) starts like so:
```Dockerfile
% include base.dockerfile
RUN \
apt-get install -qy --no-install-recommends \
# […]
```
[firefox]: https://firefox-source-docs.mozilla.org/taskcluster/taskcluster/docker-images.html
[context]: https://docs.docker.com/engine/reference/commandline/build/#extended-description
[`COPY`]: https://docs.docker.com/engine/reference/builder/#copy
## Build artifacts
On Taskcluster with a decision task,
we can have a single build task save its resulting binary executable as an [artifact],
together with multiple testing tasks that each depend on the build task
(wait until it successfully finishes before they can start)
and start by downloading the artifact that was saved earlier.
The logic for all this is in [`decision_task.py`](decision_task.py)
and can be modified in any pull request.
[artifact]: https://docs.taskcluster.net/docs/manual/using/artifacts
## Log artifacts
Taskcluster automatically save the `stdio` output of a task as an artifact,
and as special support for seeing and streaming that output while the task is still running.
## Self-service, Bugzilla, and IRC
Taskcluster is designed to be “self-service” as much as possible,
with features like in-tree `.taskcluster.yml`
or the web UI for modifying the worker type definitions.
However some changes like adding a new worker type still require Taskcluster admin access.
For those, file requests on Bugzilla under [Taskcluster :: Service Request][req].
For asking for help less formally, try the `#app-services` or `#rust-components` channels on Mozilla Slack.
[req]: https://bugzilla.mozilla.org/enter_bug.cgi?product=Taskcluster&component=Service%20Request
## Configuration recap
We try to keep as much as possible of our Taskcluster configuration in this repository.
To modify those, submit a pull request.
* The [`.taskcluster.yml`][tc.yml] file,
for starting decision tasks in reaction to GitHub events
* The [`automation/taskcluster/decision_task.py`](decision_task.py) file,
defining what other tasks to schedule

Просмотреть файл

@ -0,0 +1,89 @@
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from __future__ import print_function
import itertools
import os
from enum import Enum
import yaml
cached_build_config = None
def read_build_config():
global cached_build_config
if cached_build_config is None:
with open(os.path.join(os.path.dirname(__file__), '..', '..', '.buildconfig-android.yml'), 'rb') as f:
cached_build_config = yaml.safe_load(f)
return cached_build_config
class PublicationType(Enum):
AAR = 'aar'
JAR = 'jar'
class Publication:
def __init__(self, name: str, publication_type: PublicationType, version: str, project_path: str):
self._name = name
self._type = publication_type
self._version = version
self._project_path = project_path
def to_artifacts(self, extensions):
primary_extensions = ('.pom', '.aar', '-sources.jar') if self._type == PublicationType.AAR else ('.pom', '.jar')
extensions = [package_ext + digest_ext for package_ext, digest_ext in
itertools.product(primary_extensions, extensions)]
artifacts = []
for extension in extensions:
artifact_filename = '{}-{}{}'.format(self._name, self._version, extension)
filename_with_package = f'org/mozilla/appservices/{self._name}/{self._version}/{artifact_filename}'
artifacts.append({
'taskcluster_path': f'public/build/{artifact_filename}',
'build_fs_path': f'{self._project_path}/build/maven/{filename_with_package}',
'maven_destination': f'maven2/org/mozilla/appservices/{self._name}/{self._version}/{artifact_filename}'
})
return artifacts
def module_definitions():
build_config = read_build_config()
version = build_config['libraryVersion']
modules_defs = []
for (name, project) in build_config['projects'].items():
project_path = '/build/repo/{}'.format(project['path'])
module_artifacts = []
for artifact in project['publications']:
artifact_name = artifact['name']
artifact_type = PublicationType(artifact['type'])
extensions = ('.pom', '.aar', '-sources.jar') if artifact_type == PublicationType.AAR else ('.pom', '.jar')
extensions = [package_ext + digest_ext for package_ext, digest_ext in itertools.product(extensions, ('', '.sha1', '.md5'))]
for extension in extensions:
artifact_filename = '{}-{}{}'.format(artifact_name, version, extension)
filename_with_package = f'org/mozilla/appservices/{artifact_name}/{version}/{artifact_filename}'
module_artifacts.append({
'taskcluster_path': f'public/build/{artifact_filename}',
'build_fs_path': f'{project_path}/build/maven/{filename_with_package}',
'maven_destination': f'maven2/org/mozilla/appservices/{artifact_name}/{version}{artifact_filename}'
})
modules_defs.append({
'name': name,
'publications': [Publication(publication['name'], PublicationType(publication['type']), version, project_path)
for publication in project['publications']],
'artifacts': module_artifacts,
'uploadSymbols': project.get('uploadSymbols', False),
'path': project['path'],
})
return modules_defs
def appservices_version():
build_config = read_build_config()
return build_config['libraryVersion']

Просмотреть файл

@ -0,0 +1,396 @@
# coding: utf8
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import enum
from enum import Enum
import os.path
from build_config import module_definitions, appservices_version
from decisionlib import *
from decisionlib import SignTask
# Tags that when matched in pull-requests titles will alter the CI tasks we run.
FULL_CI_TAG = '[ci full]'
SKIP_CI_TAG = '[ci skip]'
# Task owners for which we always run full CI. Typically bots.
FULL_CI_GH_USERS = ['dependabot@users.noreply.github.com']
def main(task_for):
if task_for == "github-pull-request":
task_owner = os.environ["TASK_OWNER"]
pr_title = os.environ["GITHUB_PR_TITLE"]
if SKIP_CI_TAG in pr_title:
print("CI skip requested, exiting.")
exit(0)
elif FULL_CI_TAG in pr_title or task_owner in FULL_CI_GH_USERS:
android_multiarch()
else:
android_linux_x86_64()
elif task_for == "github-push":
android_multiarch()
elif task_for == "github-release":
is_staging = os.environ['IS_STAGING'] == 'true'
android_multiarch_release(is_staging)
else:
raise ValueError("Unrecognized $TASK_FOR value: %r", task_for)
full_task_graph = build_full_task_graph()
populate_chain_of_trust_task_graph(full_task_graph)
populate_chain_of_trust_required_but_unused_files()
build_artifacts_expire_in = "1 month"
build_dependencies_artifacts_expire_in = "3 month"
log_artifacts_expire_in = "1 year"
build_env = {
"RUST_BACKTRACE": "1",
"RUSTFLAGS": "-Dwarnings",
"CARGO_INCREMENTAL": "0",
"CI": "1",
}
linux_build_env = {
"TERM": "dumb", # Keep Gradle output sensible.
"CCACHE": "sccache",
"RUSTC_WRAPPER": "sccache",
"SCCACHE_IDLE_TIMEOUT": "1200",
"SCCACHE_CACHE_SIZE": "40G",
"SCCACHE_ERROR_LOG": "/build/sccache.log",
"RUST_LOG": "sccache=info",
}
# Calls "$PLATFORM_libs" functions and returns
# their tasks IDs.
def libs_for(deploy_environment, *platforms):
return list(map(lambda p: globals()[p + "_libs"](deploy_environment), platforms))
def android_libs(deploy_environment):
task = (
linux_build_task("Android libs (all architectures): build")
.with_script("""
pushd libs
./build-all.sh android
popd
tar -czf /build/repo/target.tar.gz libs/android
""")
.with_artifacts(
"/build/repo/target.tar.gz",
)
)
if deploy_environment == DeployEnvironment.NONE:
return task.find_or_create("build.libs.android." + CONFIG.git_sha_for_directory("libs"))
else:
return task.create()
def desktop_linux_libs(deploy_environment):
task = (
linux_build_task("Desktop libs (Linux): build")
.with_script("""
pushd libs
./build-all.sh desktop
popd
tar -czf /build/repo/target.tar.gz libs/desktop
""")
.with_artifacts(
"/build/repo/target.tar.gz",
)
)
if deploy_environment == DeployEnvironment.NONE:
return task.find_or_create("build.libs.desktop.linux." + CONFIG.git_sha_for_directory("libs"))
else:
return task.create()
def desktop_macos_libs(deploy_environment):
task = (
linux_cross_compile_build_task("Desktop libs (macOS): build")
.with_script("""
pushd libs
./build-all.sh darwin
popd
tar -czf /build/repo/target.tar.gz libs/desktop
""")
.with_artifacts(
"/build/repo/target.tar.gz",
)
)
if deploy_environment == DeployEnvironment.NONE:
return task.find_or_create("build.libs.desktop.macos." + CONFIG.git_sha_for_directory("libs"))
else:
return task.create()
def desktop_win32_x86_64_libs(deploy_environment):
task = (
linux_build_task("Desktop libs (win32-x86-64): build")
.with_script("""
pushd libs
./build-all.sh win32-x86-64
popd
tar -czf /build/repo/target.tar.gz libs/desktop
""")
.with_artifacts(
"/build/repo/target.tar.gz",
)
)
if deploy_environment == DeployEnvironment.NONE:
return task.find_or_create("build.libs.desktop.win32-x86-64." + CONFIG.git_sha_for_directory("libs"))
else:
return task.create()
def android_task(task_name, libs_tasks):
task = linux_cross_compile_build_task(task_name)
for libs_task in libs_tasks:
task.with_curl_artifact_script(libs_task, "target.tar.gz")
task.with_script("tar -xzf target.tar.gz")
return task
def ktlint_detekt():
linux_build_task("detekt").with_script("./gradlew --no-daemon clean detekt").create()
linux_build_task("ktlint").with_script("./gradlew --no-daemon clean ktlint").create()
def android_linux_x86_64():
ktlint_detekt()
libs_tasks = libs_for(DeployEnvironment.NONE, "android", "desktop_linux", "desktop_macos", "desktop_win32_x86_64")
task = (
android_task("Build and test (Android - linux-x86-64)", libs_tasks)
.with_script("""
echo "rust.targets=linux-x86-64,x86_64\n" > local.properties
""")
.with_script("""
yes | sdkmanager --update
yes | sdkmanager --licenses
./gradlew --no-daemon clean
./gradlew --no-daemon assembleDebug
./gradlew --no-daemon testDebug
""")
)
for module_info in module_definitions():
module = module_info['name']
if module.endswith("-megazord"):
task.with_script("./automation/check_megazord.sh {}".format(module[0:-9].replace("-", "_")))
return task.create()
def gradle_module_task_name(module, gradle_task_name):
return ":%s:%s" % (module, gradle_task_name)
def gradle_module_task(libs_tasks, module_info, deploy_environment):
module = module_info['name']
task = android_task("{} - Build and test".format(module), libs_tasks)
# This is important as by default the Rust plugin will only cross-compile for Android + host platform.
task.with_script('echo "rust.targets=arm,arm64,x86_64,x86,darwin,linux-x86-64,win32-x86-64-gnu\n" > local.properties')
(
task
.with_script("""
yes | sdkmanager --update
yes | sdkmanager --licenses
./gradlew --no-daemon clean
""")
.with_script("./gradlew --no-daemon {}".format(gradle_module_task_name(module, "testDebug")))
.with_script("./gradlew --no-daemon {}".format(gradle_module_task_name(module, "assembleRelease")))
.with_script("./gradlew --no-daemon {}".format(gradle_module_task_name(module, "publish")))
.with_script("./gradlew --no-daemon {}".format(gradle_module_task_name(module, "checkMavenArtifacts")))
)
for publication in module_info['publications']:
for artifact in publication.to_artifacts(('', '.sha1', '.md5')):
task.with_artifacts(artifact['build_fs_path'], artifact['taskcluster_path'])
if deploy_environment == DeployEnvironment.RELEASE and module_info['uploadSymbols']:
task.with_scopes("secrets:get:project/application-services/symbols-token")
task.with_script("./automation/upload_android_symbols.sh {}".format(module_info['path']))
return task.create()
def build_gradle_modules_tasks(deploy_environment):
libs_tasks = libs_for(deploy_environment, "android", "desktop_linux", "desktop_macos", "desktop_win32_x86_64")
module_build_tasks = {}
for module_info in module_definitions():
module_build_tasks[module_info['name']] = gradle_module_task(libs_tasks, module_info, deploy_environment)
return module_build_tasks
def android_multiarch():
ktlint_detekt()
build_gradle_modules_tasks(DeployEnvironment.NONE)
def android_multiarch_release(is_staging):
module_build_tasks = build_gradle_modules_tasks(DeployEnvironment.STAGING_RELEASE if is_staging else DeployEnvironment.RELEASE)
version = appservices_version()
bucket_name = os.environ['BEETMOVER_BUCKET']
bucket_public_url = os.environ['BEETMOVER_BUCKET_PUBLIC_URL']
for module_info in module_definitions():
module = module_info['name']
build_task = module_build_tasks[module]
sign_task = (
SignTask("Sign Android module: {}".format(module))
.with_description("Signs module")
.with_worker_type("appsv-signing-dep-v1" if is_staging else "appsv-signing-v1")
# We want to make sure ALL builds succeeded before doing a release.
.with_dependencies(*module_build_tasks.values())
.with_upstream_artifact({
"paths": [artifact["taskcluster_path"]
for publication in module_info["publications"]
for artifact in publication.to_artifacts(('',))],
"formats": ["autograph_gpg"],
"taskId": build_task,
"taskType": "build"
})
.with_scopes(
"project:mozilla:application-services:releng:signing:cert:{}-signing".format(
"dep" if is_staging else "release")
)
.create()
)
(
BeetmoverTask("Publish Android module: {} via beetmover".format(module))
.with_description("Publish release module {} to {}".format(module, bucket_public_url))
.with_worker_type(os.environ['BEETMOVER_WORKER_TYPE'])
.with_dependencies(sign_task)
.with_upstream_artifact({
"paths": [artifact['taskcluster_path']
for publication in module_info["publications"]
for artifact in publication.to_artifacts(('', '.sha1', '.md5'))],
"taskId": build_task,
"taskType": "build",
})
.with_upstream_artifact({
"paths": [artifact['taskcluster_path']
for publication in module_info["publications"]
for artifact in publication.to_artifacts(('.asc',))],
"taskId": sign_task,
"taskType": "signing",
})
.with_app_name("appservices")
.with_artifact_map([{
"locale": "en-US",
"taskId": build_task,
"paths": {
artifact["taskcluster_path"]: {
"checksums_path": "", # TODO beetmover marks this as required, but it's not needed
"destinations": [artifact["maven_destination"]],
}
for publication in module_info["publications"]
for artifact in publication.to_artifacts(('', '.sha1', '.md5'))
}
}, {
"locale": "en-US",
"taskId": sign_task,
"paths": {
artifact["taskcluster_path"]: {
"checksums_path": "", # TODO beetmover marks this as required, but it's not needed
"destinations": [artifact["maven_destination"]],
}
for publication in module_info["publications"]
for artifact in publication.to_artifacts(('.asc',))
},
}])
.with_app_version(version)
.with_scopes(
"project:mozilla:application-services:releng:beetmover:bucket:{}".format(bucket_name),
"project:mozilla:application-services:releng:beetmover:action:push-to-maven"
)
.with_routes("notify.email.a-s-ci-failures@mozilla.com.on-failed")
.create()
)
def dockerfile_path(name):
return os.path.join(os.path.dirname(__file__), "docker", name + ".dockerfile")
def linux_task(name):
task = (
DockerWorkerTask(name)
.with_worker_type(os.environ.get("BUILD_WORKER_TYPE"))
)
if os.environ["TASK_FOR"] == "github-release":
task.with_features("chainOfTrust")
return task
def linux_build_task(name):
use_indexed_docker_image = os.environ["TASK_FOR"] != "github-release"
task = (
linux_task(name)
# https://docs.taskcluster.net/docs/reference/workers/docker-worker/docs/caches
.with_scopes("docker-worker:cache:application-services-*")
.with_caches(**{
"application-services-cargo-registry": "/root/.cargo/registry",
"application-services-cargo-git": "/root/.cargo/git",
"application-services-sccache": "/root/.cache/sccache",
"application-services-gradle": "/root/.gradle",
"application-services-rustup": "/root/.rustup",
"application-services-android-ndk-toolchain": "/root/.android-ndk-r15c-toolchain",
})
.with_index_and_artifacts_expire_in(build_artifacts_expire_in)
.with_artifacts("/build/sccache.log")
.with_max_run_time_minutes(120)
.with_dockerfile(dockerfile_path("build"), use_indexed_docker_image)
.with_env(**build_env, **linux_build_env)
.with_script("""
rustup toolchain install stable
rustup default stable
rustup target add x86_64-linux-android i686-linux-android armv7-linux-androideabi aarch64-linux-android
""")
.with_script("""
test -d $ANDROID_NDK_TOOLCHAIN_DIR/arm-$ANDROID_NDK_API_VERSION || $ANDROID_NDK_ROOT/build/tools/make_standalone_toolchain.py --arch="arm" --api="$ANDROID_NDK_API_VERSION" --install-dir="$ANDROID_NDK_TOOLCHAIN_DIR/arm-$ANDROID_NDK_API_VERSION" --deprecated-headers --force
test -d $ANDROID_NDK_TOOLCHAIN_DIR/arm64-$ANDROID_NDK_API_VERSION || $ANDROID_NDK_ROOT/build/tools/make_standalone_toolchain.py --arch="arm64" --api="$ANDROID_NDK_API_VERSION" --install-dir="$ANDROID_NDK_TOOLCHAIN_DIR/arm64-$ANDROID_NDK_API_VERSION" --deprecated-headers --force
test -d $ANDROID_NDK_TOOLCHAIN_DIR/x86-$ANDROID_NDK_API_VERSION || $ANDROID_NDK_ROOT/build/tools/make_standalone_toolchain.py --arch="x86" --api="$ANDROID_NDK_API_VERSION" --install-dir="$ANDROID_NDK_TOOLCHAIN_DIR/x86-$ANDROID_NDK_API_VERSION" --deprecated-headers --force
test -d $ANDROID_NDK_TOOLCHAIN_DIR/x86_64-$ANDROID_NDK_API_VERSION || $ANDROID_NDK_ROOT/build/tools/make_standalone_toolchain.py --arch="x86_64" --api="$ANDROID_NDK_API_VERSION" --install-dir="$ANDROID_NDK_TOOLCHAIN_DIR/x86_64-$ANDROID_NDK_API_VERSION" --deprecated-headers --force
""")
.with_repo()
.with_script("""
./libs/verify-android-environment.sh
""")
)
# Send email notifications for failures on master.
if os.environ["TASK_FOR"] == "github-push":
task.with_routes("notify.email.a-s-ci-failures@mozilla.com.on-failed")
return task
def linux_cross_compile_build_task(name):
return (
linux_build_task(name)
.with_scopes('project:releng:services/tooltool/api/download/internal')
.with_features('taskclusterProxy') # So we can fetch from tooltool.
.with_script("""
rustup target add x86_64-apple-darwin
pushd libs
./cross-compile-macos-on-linux-desktop-libs.sh
popd
# Rust requires dsymutil on the PATH: https://github.com/rust-lang/rust/issues/52728.
export PATH=$PATH:/tmp/clang/bin
export ORG_GRADLE_PROJECT_RUST_ANDROID_GRADLE_TARGET_X86_64_APPLE_DARWIN_NSS_STATIC=1
export ORG_GRADLE_PROJECT_RUST_ANDROID_GRADLE_TARGET_X86_64_APPLE_DARWIN_NSS_DIR=/build/repo/libs/desktop/darwin/nss
export ORG_GRADLE_PROJECT_RUST_ANDROID_GRADLE_TARGET_X86_64_APPLE_DARWIN_SQLCIPHER_LIB_DIR=/build/repo/libs/desktop/darwin/sqlcipher/lib
export ORG_GRADLE_PROJECT_RUST_ANDROID_GRADLE_TARGET_X86_64_APPLE_DARWIN_CC=/tmp/clang/bin/clang
export ORG_GRADLE_PROJECT_RUST_ANDROID_GRADLE_TARGET_X86_64_APPLE_DARWIN_TOOLCHAIN_PREFIX=/tmp/cctools/bin
export ORG_GRADLE_PROJECT_RUST_ANDROID_GRADLE_TARGET_X86_64_APPLE_DARWIN_AR=/tmp/cctools/bin/x86_64-darwin11-ar
export ORG_GRADLE_PROJECT_RUST_ANDROID_GRADLE_TARGET_X86_64_APPLE_DARWIN_RANLIB=/tmp/cctools/bin/x86_64-darwin11-ranlib
export ORG_GRADLE_PROJECT_RUST_ANDROID_GRADLE_TARGET_X86_64_APPLE_DARWIN_LD_LIBRARY_PATH=/tmp/clang/lib
export ORG_GRADLE_PROJECT_RUST_ANDROID_GRADLE_TARGET_X86_64_APPLE_DARWIN_RUSTFLAGS="-C linker=/tmp/clang/bin/clang -C link-arg=-B -C link-arg=/tmp/cctools/bin -C link-arg=-target -C link-arg=x86_64-darwin11 -C link-arg=-isysroot -C link-arg=/tmp/MacOSX10.11.sdk -C link-arg=-Wl,-syslibroot,/tmp/MacOSX10.11.sdk -C link-arg=-Wl,-dead_strip"
# For ring's use of `cc`.
export ORG_GRADLE_PROJECT_RUST_ANDROID_GRADLE_TARGET_X86_64_APPLE_DARWIN_CFLAGS_x86_64_apple_darwin="-B /tmp/cctools/bin -target x86_64-darwin11 -isysroot /tmp/MacOSX10.11.sdk -Wl,-syslibroot,/tmp/MacOSX10.11.sdk -Wl,-dead_strip"
rustup target add x86_64-pc-windows-gnu
export ORG_GRADLE_PROJECT_RUST_ANDROID_GRADLE_TARGET_X86_64_PC_WINDOWS_GNU_RUSTFLAGS="-C linker=x86_64-w64-mingw32-gcc"
export ORG_GRADLE_PROJECT_RUST_ANDROID_GRADLE_TARGET_X86_64_PC_WINDOWS_GNU_AR=x86_64-w64-mingw32-ar
export ORG_GRADLE_PROJECT_RUST_ANDROID_GRADLE_TARGET_X86_64_PC_WINDOWS_GNU_CC=x86_64-w64-mingw32-gcc
""")
)
CONFIG.task_name_template = "Application Services - %s"
CONFIG.index_prefix = "project.application-services.application-services"
CONFIG.docker_images_expire_in = build_dependencies_artifacts_expire_in
CONFIG.repacked_msi_files_expire_in = build_dependencies_artifacts_expire_in
class DeployEnvironment(Enum):
RELEASE = enum.auto()
STAGING_RELEASE = enum.auto()
NONE = enum.auto()
if __name__ == "__main__": # pragma: no cover
main(task_for=os.environ["TASK_FOR"])

Просмотреть файл

@ -0,0 +1,564 @@
# coding: utf8
# Copyright 2018 The Servo Project Developers. See the COPYRIGHT
# file at the top-level directory of this distribution.
#
# Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
# http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
# <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
# option. This file may not be copied, modified, or distributed
# except according to those terms.
"""
Project-independent library for Taskcluster decision tasks
"""
import base64
import datetime
import hashlib
import json
import os
import re
import subprocess
import sys
from typing import List
import taskcluster
# Public API
__all__ = [
"CONFIG", "SHARED",
"Task", "DockerWorkerTask", "BeetmoverTask",
"build_full_task_graph", "populate_chain_of_trust_required_but_unused_files",
"populate_chain_of_trust_task_graph",
]
class Config:
"""
Global configuration, for users of the library to modify.
"""
def __init__(self):
self.task_name_template = "%s"
self.index_prefix = "garbage.application-services-decisionlib"
self.scopes_for_all_subtasks = []
self.routes_for_all_subtasks = []
self.docker_images_expire_in = "1 month"
self.repacked_msi_files_expire_in = "1 month"
# Set by docker-worker:
# https://docs.taskcluster.net/docs/reference/workers/docker-worker/docs/environment
self.decision_task_id = os.environ.get("TASK_ID")
# Set in the decision tasks payload, such as defined in .taskcluster.yml
self.task_owner = os.environ.get("TASK_OWNER")
self.task_source = os.environ.get("TASK_SOURCE")
self.build_worker_type = os.environ.get("BUILD_WORKER_TYPE")
self.images_worker_type = os.environ.get("IMAGES_WORKER_TYPE")
self.git_url = os.environ.get("APPSERVICES_HEAD_REPOSITORY")
self.git_ref = os.environ.get("APPSERVICES_HEAD_BRANCH")
self.git_sha = os.environ.get("APPSERVICES_HEAD_REV")
# Map directory string to git sha for that directory.
self._git_sha_for_directory = {}
def git_sha_is_current_head(self):
output = subprocess.check_output(["git", "rev-parse", "HEAD"])
self.git_sha = output.decode("utf8").strip()
def git_sha_for_directory(self, directory):
try:
return self._git_sha_for_directory[directory]
except KeyError:
output = subprocess.check_output(["git", "rev-parse", "HEAD:{}".format(directory)])
sha = output.decode("utf8").strip()
self._git_sha_for_directory[directory] = sha
return sha
class Shared:
"""
Global shared state.
"""
def __init__(self):
self.now = datetime.datetime.utcnow()
self.tasks_cache = {}
self.found_or_created_indexed_tasks = {}
self.all_tasks = []
# taskclusterProxy URLs:
# https://docs.taskcluster.net/docs/reference/workers/docker-worker/docs/features
self.queue_service = taskcluster.Queue(options={"baseUrl": "http://taskcluster/queue/v1/"})
self.index_service = taskcluster.Index(options={"baseUrl": "http://taskcluster/index/v1/"})
def from_now_json(self, offset):
"""
Same as `taskcluster.fromNowJSON`, but uses the creation time of `self` for now.
"""
return taskcluster.stringDate(taskcluster.fromNow(offset, dateObj=self.now))
def schedule_task(self, taskId, taskDefinition):
# print(json.dumps(taskDefinition, indent=4, separators=(',', ': ')))
self.queue_service.createTask(taskId, taskDefinition)
print("Scheduled %s" % taskDefinition['metadata']['name'])
self.all_tasks.append(taskId)
def build_task_graph(self):
full_task_graph = {}
# TODO: Switch to async python to speed up submission
for task_id in self.all_tasks:
full_task_graph[task_id] = {
'task': SHARED.queue_service.task(task_id),
}
return full_task_graph
CONFIG = Config()
SHARED = Shared()
def chaining(op, attr):
def method(self, *args, **kwargs):
op(self, attr, *args, **kwargs)
return self
return method
def append_to_attr(self, attr, *args): getattr(self, attr).extend(args)
def prepend_to_attr(self, attr, *args): getattr(self, attr)[0:0] = list(args)
def update_attr(self, attr, **kwargs): getattr(self, attr).update(kwargs)
def build_full_task_graph():
return SHARED.build_task_graph()
class Task:
"""
A task definition, waiting to be created.
Typical is to use chain the `with_*` methods to set or extend this objects attributes,
then call the `create` or `find_or_create` method to schedule a task.
This is an abstract class that needs to be specialized for different worker implementations.
"""
def __init__(self, name):
self.name = name
self.description = ""
self.scheduler_id = "taskcluster-github"
self.provisioner_id = "aws-provisioner-v1"
self.worker_type = "github-worker"
self.deadline_in = "1 day"
self.expires_in = "1 year"
self.index_and_artifacts_expire_in = self.expires_in
self.dependencies = []
self.scopes = []
self.routes = []
self.extra = {}
# All `with_*` methods return `self`, so multiple method calls can be chained.
with_description = chaining(setattr, "description")
with_scheduler_id = chaining(setattr, "scheduler_id")
with_provisioner_id = chaining(setattr, "provisioner_id")
with_worker_type = chaining(setattr, "worker_type")
with_deadline_in = chaining(setattr, "deadline_in")
with_expires_in = chaining(setattr, "expires_in")
with_index_and_artifacts_expire_in = chaining(setattr, "index_and_artifacts_expire_in")
with_dependencies = chaining(append_to_attr, "dependencies")
with_scopes = chaining(append_to_attr, "scopes")
with_routes = chaining(append_to_attr, "routes")
with_extra = chaining(update_attr, "extra")
def build_worker_payload(self): # pragma: no cover
"""
Overridden by sub-classes to return a dictionary in a worker-specific format,
which is used as the `payload` property in a task definition request
passed to the Queues `createTask` API.
<https://docs.taskcluster.net/docs/reference/platform/taskcluster-queue/references/api#createTask>
"""
raise NotImplementedError
def create(self):
"""
Call the Queues `createTask` API to schedule a new task, and return its ID.
<https://docs.taskcluster.net/docs/reference/platform/taskcluster-queue/references/api#createTask>
"""
worker_payload = self.build_worker_payload()
assert CONFIG.decision_task_id
assert CONFIG.task_owner
assert CONFIG.task_source
queue_payload = {
"taskGroupId": CONFIG.decision_task_id,
"dependencies": [CONFIG.decision_task_id] + self.dependencies,
"schedulerId": self.scheduler_id,
"provisionerId": self.provisioner_id,
"workerType": self.worker_type,
"created": SHARED.from_now_json(""),
"deadline": SHARED.from_now_json(self.deadline_in),
"expires": SHARED.from_now_json(self.expires_in),
"metadata": {
"name": CONFIG.task_name_template % self.name,
"description": self.description,
"owner": CONFIG.task_owner,
"source": CONFIG.task_source,
},
"payload": worker_payload,
}
scopes = self.scopes + CONFIG.scopes_for_all_subtasks
routes = self.routes + CONFIG.routes_for_all_subtasks
if any(r.startswith("index.") for r in routes):
self.extra.setdefault("index", {})["expires"] = \
SHARED.from_now_json(self.index_and_artifacts_expire_in)
dict_update_if_truthy(
queue_payload,
scopes=scopes,
routes=routes,
extra=self.extra,
)
task_id = taskcluster.slugId().decode("utf8")
SHARED.schedule_task(task_id, queue_payload)
return task_id
def find_or_create(self, index_path=None):
"""
Try to find a task in the Index and return its ID.
The index path used is `{CONFIG.index_prefix}.{index_path}`.
`index_path` defaults to `by-task-definition.{sha256}`
with a hash of the worker payload and worker type.
If no task is found in the index,
it is created with a route to add it to the index at that same path if it succeeds.
<https://docs.taskcluster.net/docs/reference/core/taskcluster-index/references/api#findTask>
"""
if not index_path:
worker_type = self.worker_type
index_by = json.dumps([worker_type, self.build_worker_payload()]).encode("utf-8")
index_path = "by-task-definition." + hashlib.sha256(index_by).hexdigest()
index_path = "%s.%s" % (CONFIG.index_prefix, index_path)
task_id = SHARED.found_or_created_indexed_tasks.get(index_path)
if task_id is not None:
return task_id
try:
task_id = SHARED.index_service.findTask(index_path)["taskId"]
SHARED.all_tasks.append(task_id)
except taskcluster.TaskclusterRestFailure as e:
if e.status_code != 404: # pragma: no cover
raise
self.routes.append("index." + index_path)
task_id = self.create()
SHARED.found_or_created_indexed_tasks[index_path] = task_id
return task_id
def reuse_or_create(self, cache_id=None):
"""
See if we can re-use a task with the same cache_id or
create a new one, this is similar as `find_or_create`
except that the scope of this function is limited to
its execution since nothing is persisted.
"""
task_id = SHARED.tasks_cache.get(cache_id)
if task_id is not None:
return task_id
task_id = self.create()
SHARED.tasks_cache[cache_id] = task_id
return task_id
class BeetmoverTask(Task):
def __init__(self, name):
super().__init__(name)
self.provisioner_id = "scriptworker-prov-v1"
self.app_name = None
self.app_version = None
self.upstream_artifacts = []
self.artifact_map = []
with_app_name = chaining(setattr, "app_name")
with_app_version = chaining(setattr, "app_version")
with_upstream_artifact = chaining(append_to_attr, "upstream_artifacts")
with_artifact_map = chaining(setattr, "artifact_map")
def build_worker_payload(self):
payload = {
"maxRunTime": 10 * 60,
"releaseProperties": {
"appName": self.app_name,
},
"upstreamArtifacts": self.upstream_artifacts,
"artifactMap": self.artifact_map,
"version": self.app_version,
}
return payload
class SignTask(Task):
def __init__(self, name):
super().__init__(name)
self.provisioner_id = "scriptworker-prov-v1"
self.upstream_artifacts = []
with_upstream_artifact = chaining(append_to_attr, "upstream_artifacts")
def build_worker_payload(self):
payload = {
"maxRunTime": 10 * 60,
"upstreamArtifacts": self.upstream_artifacts,
}
return payload
class DockerWorkerArtifact:
def __init__(self, worker_fs_path, taskcluster_path):
self.worker_fs_path = worker_fs_path
self.taskcluster_path = taskcluster_path
class DockerWorkerTask(Task):
"""
Task definition for a worker type that runs the `generic-worker` implementation.
Scripts are interpreted with `bash`.
<https://github.com/taskcluster/docker-worker>
"""
artifacts: List[DockerWorkerArtifact]
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# We use this specific version because our decision task also runs on this one.
# We also use that same version in docker/build.dockerfile
self.docker_image = "ubuntu:bionic-20180821"
self.max_run_time_minutes = 30
self.scripts = []
self.env = {}
self.caches = {}
self.features = {}
self.artifacts = []
with_docker_image = chaining(setattr, "docker_image")
with_max_run_time_minutes = chaining(setattr, "max_run_time_minutes")
with_script = chaining(append_to_attr, "scripts")
with_early_script = chaining(prepend_to_attr, "scripts")
with_caches = chaining(update_attr, "caches")
with_env = chaining(update_attr, "env")
def with_artifacts(self, worker_fs_path, taskcluster_path=None):
"""Adds artifact to task definition
Args:
worker_fs_path: path to artifact on worker
taskcluster_path: as represented on taskcluster. Defaults to "public/{the url basename of worker_path}"
Returns:
"""
self.artifacts.append(DockerWorkerArtifact(
worker_fs_path,
taskcluster_path or "public/" + url_basename(worker_fs_path)
))
return self
def build_worker_payload(self):
"""
Return a `docker-worker` worker payload.
<https://docs.taskcluster.net/docs/reference/workers/docker-worker/docs/payload>
"""
worker_payload = {
"image": self.docker_image,
"maxRunTime": self.max_run_time_minutes * 60,
"command": [
"/bin/bash", "--login", "-x", "-e", "-c",
deindent("\n".join(self.scripts))
],
}
if self.features.get("chainOfTrust"):
if isinstance(self.docker_image, dict):
cot = self.extra.setdefault("chainOfTrust", {})
cot.setdefault('inputs', {})['docker-image'] = self.docker_image['taskId']
return dict_update_if_truthy(
worker_payload,
env=self.env,
cache=self.caches,
features=self.features,
artifacts={
artifact.taskcluster_path: {
"type": "file",
"path": artifact.worker_fs_path,
"expires": SHARED.from_now_json(self.index_and_artifacts_expire_in),
}
for artifact in self.artifacts
},
)
def with_features(self, *names):
"""
Enable the give `docker-worker` features.
<https://docs.taskcluster.net/docs/reference/workers/docker-worker/docs/features>
"""
self.features.update({name: True for name in names})
return self
def with_curl_script(self, url, file_path):
return self \
.with_script("""
mkdir -p $(dirname {file_path})
curl -sfSL --retry 5 --retry-delay 10 --connect-timeout 10 {url} -o {file_path}
""".format(url=url, file_path=file_path))
def with_curl_artifact_script(self, task_id, artifact_name, out_directory=""):
return self \
.with_dependencies(task_id) \
.with_curl_script(
"https://queue.taskcluster.net/v1/task/%s/artifacts/public/%s"
% (task_id, artifact_name),
os.path.join(out_directory, url_basename(artifact_name)),
)
def with_repo(self):
"""
Make a shallow clone the git repository at the start of the task.
This uses `CONFIG.git_url`, `CONFIG.git_ref`, and `CONFIG.git_sha`,
and creates the clone in a `/repo` directory
at the root of the Docker containers filesystem.
`git` and `ca-certificate` need to be installed in the Docker image.
"""
return self \
.with_env(**git_env()) \
.with_early_script("""
cd repo
git fetch --quiet --tags "$APPSERVICES_HEAD_REPOSITORY" "$APPSERVICES_HEAD_BRANCH"
git reset --hard "$APPSERVICES_HEAD_REV"
""")
def with_dockerfile(self, dockerfile, use_indexed_task=True):
"""
Build a Docker image based on the given `Dockerfile`, and use it for this task.
`dockerfile` is a path in the filesystem where this code is running.
Some non-standard syntax is supported, see `expand_dockerfile`.
The image is indexed based on a hash of the expanded `Dockerfile`,
and cached for `CONFIG.docker_images_expire_in`.
Images are built without any *context*.
<https://docs.docker.com/develop/develop-images/dockerfile_best-practices/#understand-build-context>
"""
basename = os.path.basename(dockerfile)
suffix = ".dockerfile"
assert basename.endswith(suffix)
image_name = basename[:-len(suffix)]
dockerfile_contents = expand_dockerfile(dockerfile)
digest = hashlib.sha256(dockerfile_contents).hexdigest()
image_build_task = (
DockerWorkerTask("Docker image: " + image_name)
.with_worker_type(CONFIG.images_worker_type)
.with_max_run_time_minutes(30)
.with_index_and_artifacts_expire_in(CONFIG.docker_images_expire_in)
.with_features("dind")
.with_env(DOCKERFILE=dockerfile_contents)
.with_artifacts("/image.tar.lz4")
.with_script("""
echo "$DOCKERFILE" | docker build -t taskcluster-built -
docker save taskcluster-built | lz4 > /image.tar.lz4
""")
.with_docker_image(
# https://github.com/servo/taskcluster-bootstrap-docker-images#image-builder
"servobrowser/taskcluster-bootstrap:image-builder@sha256:" \
"0a7d012ce444d62ffb9e7f06f0c52fedc24b68c2060711b313263367f7272d9d"
)
)
if self.features.get("chainOfTrust"):
image_build_task.with_features("chainOfTrust")
task_index = "appservices-docker-image." + digest
if use_indexed_task:
image_build_task_id = image_build_task.find_or_create(task_index)
else:
image_build_task_id = image_build_task.reuse_or_create(task_index)
return self \
.with_dependencies(image_build_task_id) \
.with_docker_image({
"type": "task-image",
"path": "public/image.tar.lz4",
"taskId": image_build_task_id,
})
def expand_dockerfile(dockerfile):
"""
Read the file at path `dockerfile`,
and transitively expand the non-standard `% include` header if it is present.
"""
with open(dockerfile, "rb") as f:
dockerfile_contents = f.read()
include_marker = b"% include"
if not dockerfile_contents.startswith(include_marker):
return dockerfile_contents
include_line, _, rest = dockerfile_contents.partition(b"\n")
included = include_line[len(include_marker):].strip().decode("utf8")
path = os.path.join(os.path.dirname(dockerfile), included)
return b"\n".join([expand_dockerfile(path), rest])
def git_env():
assert CONFIG.git_url
assert CONFIG.git_ref
assert CONFIG.git_sha
return {
"APPSERVICES_HEAD_REPOSITORY": CONFIG.git_url,
"APPSERVICES_HEAD_BRANCH": CONFIG.git_ref,
"APPSERVICES_HEAD_REV": CONFIG.git_sha,
}
def dict_update_if_truthy(d, **kwargs):
for key, value in kwargs.items():
if value:
d[key] = value
return d
def deindent(string):
return re.sub("\n +", "\n ", string).strip()
def url_basename(url):
return url.rpartition("/")[-1]
def populate_chain_of_trust_required_but_unused_files():
# These files are needed to keep chainOfTrust happy. However,
# they are not needed for a-s at the moment. For more details, see:
# https://github.com/mozilla-releng/scriptworker/pull/209/files#r184180585
for file_names in ('actions.json', 'parameters.yml'):
with open(file_names, 'w') as f:
json.dump({}, f) # Yaml is a super-set of JSON.
def populate_chain_of_trust_task_graph(full_task_graph):
# taskgraph must follow the format:
# {
# task_id: full_task_definition
# }
with open('task-graph.json', 'w') as f:
json.dump(full_task_graph, f)

Просмотреть файл

@ -0,0 +1,152 @@
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
# We use this specific version because our decision task also runs on this one.
# We also use that same version in decisionlib.py
FROM ubuntu:bionic-20180821
MAINTAINER Edouard Oger "eoger@mozilla.com"
# Configuration
ENV ANDROID_BUILD_TOOLS "28.0.3"
ENV ANDROID_SDK_VERSION "3859397"
ENV ANDROID_PLATFORM_VERSION "28"
# Set up the language variables to avoid problems (we run locale-gen later).
ENV LANG en_US.UTF-8
ENV LANGUAGE en_US:en
ENV LC_ALL en_US.UTF-8
# Do not use fancy output on taskcluster
ENV TERM dumb
ENV GRADLE_OPTS -Xmx4096m -Dorg.gradle.daemon=false
# Used to detect in scripts whether we are running on taskcluster
ENV CI_TASKCLUSTER true
ENV \
# Some APT packages like 'tzdata' wait for user input on install by default.
# https://stackoverflow.com/questions/44331836/apt-get-install-tzdata-noninteractive
DEBIAN_FRONTEND=noninteractive
# System.
RUN apt-get update -qq \
&& apt-get install -qy --no-install-recommends \
# To compile Android stuff.
openjdk-8-jdk \
git \
curl \
# Required by symbolstore.py.
file \
# Will set up the timezone to UTC (?).
tzdata \
# To install UTF-8 locales.
locales \
# For `cc` crates; see https://github.com/jwilm/alacritty/issues/1440.
# <TODO: Is this still true?>.
g++ \
# <TODO: Explain why we have this dependency>.
clang \
python3 \
python3-pip \
# taskcluster > mohawk > setuptools.
python3-setuptools \
# Required to extract the Android SDK/NDK.
unzip \
# Required by tooltool to extract tar.xz archives.
xz-utils \
# Required to build libs/.
make \
# Required to build sqlcipher.
tclsh \
# Required in libs/ by some scripts patching the source they download.
patch \
# For windows cross-compilation.
mingw-w64 \
## NSS build dependencies
gyp \
ninja-build \
zlib1g-dev \
# <TODO: Delete p7zip once NSS windows is actually compiled instead of downloaded>.
p7zip-full \
## End of NSS build dependencies
&& apt-get clean
RUN pip3 install --upgrade pip
RUN pip3 install \
'taskcluster>=4,<5' \
pyyaml
# Compile the UTF-8 english locale files (required by Python).
RUN locale-gen en_US.UTF-8
# Android SDK
RUN mkdir -p /build/android-sdk
WORKDIR /build
ENV ANDROID_HOME /build/android-sdk
ENV ANDROID_SDK_HOME /build/android-sdk
ENV PATH ${PATH}:${ANDROID_SDK_HOME}/tools:${ANDROID_SDK_HOME}/tools/bin:${ANDROID_SDK_HOME}/platform-tools:/opt/tools:${ANDROID_SDK_HOME}/build-tools/${ANDROID_BUILD_TOOLS}
RUN curl -sfSL --retry 5 --retry-delay 10 https://dl.google.com/android/repository/sdk-tools-linux-${ANDROID_SDK_VERSION}.zip > sdk.zip \
&& unzip -q sdk.zip -d ${ANDROID_SDK_HOME} \
&& rm sdk.zip \
&& mkdir -p /build/android-sdk/.android/ \
&& touch /build/android-sdk/.android/repositories.cfg \
&& yes | sdkmanager --licenses \
&& sdkmanager --verbose "platform-tools" \
"platforms;android-${ANDROID_PLATFORM_VERSION}" \
"build-tools;${ANDROID_BUILD_TOOLS}" \
"extras;android;m2repository" \
"extras;google;m2repository"
# Android NDK
# r15c agrees with mozilla-central and, critically, supports the --deprecated-headers flag needed to
# build OpenSSL
ENV ANDROID_NDK_VERSION "r15c"
# $ANDROID_NDK_ROOT is the preferred name, but the android gradle plugin uses $ANDROID_NDK_HOME.
ENV ANDROID_NDK_ROOT /build/android-ndk
ENV ANDROID_NDK_HOME /build/android-ndk
RUN curl -sfSL --retry 5 --retry-delay 10 https://dl.google.com/android/repository/android-ndk-${ANDROID_NDK_VERSION}-linux-x86_64.zip > ndk.zip \
&& unzip -q ndk.zip -d /build \
&& rm ndk.zip \
&& mv /build/android-ndk-${ANDROID_NDK_VERSION} ${ANDROID_NDK_ROOT}
ENV ANDROID_NDK_TOOLCHAIN_DIR /root/.android-ndk-r15c-toolchain
ENV ANDROID_NDK_API_VERSION 21
# Rust
RUN set -eux; \
RUSTUP_PLATFORM='x86_64-unknown-linux-gnu'; \
RUSTUP_VERSION='1.18.3'; \
RUSTUP_SHA256='a46fe67199b7bcbbde2dcbc23ae08db6f29883e260e23899a88b9073effc9076'; \
curl -sfSL --retry 5 --retry-delay 10 -O "https://static.rust-lang.org/rustup/archive/${RUSTUP_VERSION}/${RUSTUP_PLATFORM}/rustup-init"; \
echo "${RUSTUP_SHA256} *rustup-init" | sha256sum -c -; \
chmod +x rustup-init; \
./rustup-init -y --no-modify-path --default-toolchain none; \
rm rustup-init
ENV PATH=/root/.cargo/bin:$PATH
# sccache
RUN \
curl -sfSL --retry 5 --retry-delay 10 \
https://github.com/mozilla/sccache/releases/download/0.2.11/sccache-0.2.11-x86_64-unknown-linux-musl.tar.gz \
| tar -xz --strip-components=1 -C /usr/local/bin/ \
sccache-0.2.11-x86_64-unknown-linux-musl/sccache
# tooltool
RUN \
curl -sfSL --retry 5 --retry-delay 10 \
-o /usr/local/bin/tooltool.py \
https://raw.githubusercontent.com/mozilla/build-tooltool/36511dae0ead6848017e2d569b1f6f1b36984d40/tooltool.py && \
chmod +x /usr/local/bin/tooltool.py
RUN git init repo

54
automation/taskcluster/mock.py Executable file
Просмотреть файл

@ -0,0 +1,54 @@
#!/bin/bash
# Copyright 2018 The Servo Project Developers. See the COPYRIGHT
# file at the top-level directory of this distribution.
#
# Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
# http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
# <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
# option. This file may not be copied, modified, or distributed
# except according to those terms.
''''set -e
python3 -m coverage run $0
python3 -m coverage report -m --fail-under 100
exit
'''
"""
Run the decision task with fake Taskcluster APIs, to catch Python errors before pushing.
"""
import os
import sys
from unittest.mock import MagicMock
class TaskclusterRestFailure(Exception):
status_code = 404
class Index:
__init__ = insertTask = lambda *_, **__: None
def findTask(self, _):
raise TaskclusterRestFailure
stringDate = str
slugId = b"id".lower
Queue = fromNow = MagicMock()
sys.modules["taskcluster"] = sys.modules[__name__]
sys.dont_write_bytecode = True
os.environ.update(**{k: k for k in "TASK_ID TASK_OWNER TASK_SOURCE GIT_URL GIT_SHA".split()})
os.environ["APPSERVICES_HEAD_BRANCH"] = "refs/heads/auto"
import decision_task as decision_task
print("\n# Push:")
decision_task.main("github-pull-request", mock=True)
print("\n# Push with hot caches:")
decision_task.main("github-pull-request", mock=True)
# print("\n# Daily:")
# decision_task.main("daily", mock=True)

Просмотреть файл

@ -0,0 +1,52 @@
#!/usr/bin/env bash
set -euvx
if [[ "${#}" -ne 1 ]]
then
echo "Usage:"
echo "./automation/upload_android_symbols.sh <project path>"
exit 1
fi
if [[ ! -f "$PWD/libs/android_defaults.sh" ]]
then
echo "upload_android_symbols.sh must be executed from the root directory."
exit 1
fi
PROJECT_PATH=${1}
# shellcheck disable=SC1091
source "libs/android_defaults.sh"
OUTPUT_FOLDER="crashreporter-symbols"
DUMP_SYMS_DIR="automation/symbols-generation/bin"
if [[ ! -f "${DUMP_SYMS_DIR}/dump_syms" ]]; then
tooltool.py --manifest=automation/symbols-generation/dump_syms.manifest --url=http://taskcluster/tooltool.mozilla-releng.net/ fetch
chmod +x dump_syms
mkdir -p "${DUMP_SYMS_DIR}"
mv dump_syms "${DUMP_SYMS_DIR}"
fi
# Keep the 3 in sync.
TARGET_ARCHS=("x86_64" "x86" "arm64" "arm")
JNI_LIBS_TARGETS=("x86_64" "x86" "arm64-v8a" "armeabi-v7a")
OBJCOPY_BINS=("x86_64-linux-android-objcopy" "i686-linux-android-objcopy" "aarch64-linux-android-objcopy" "arm-linux-androideabi-objcopy")
rm -rf "${OUTPUT_FOLDER}"
mkdir -p "${OUTPUT_FOLDER}"
# 1. Generate the symbols.
for i in "${!TARGET_ARCHS[@]}"; do
export OBJCOPY="${ANDROID_NDK_TOOLCHAIN_DIR}/${TARGET_ARCHS[${i}]}-${ANDROID_NDK_API_VERSION}/bin/${OBJCOPY_BINS[${i}]}"
JNI_SO_PATH="${PROJECT_PATH}/build/rustJniLibs/android/${JNI_LIBS_TARGETS[${i}]}"
for sofile in "${JNI_SO_PATH}"/*.so; do
python3 automation/symbols-generation/symbolstore.py -c -s . --vcs-info "${DUMP_SYMS_DIR}"/dump_syms "${OUTPUT_FOLDER}" "${sofile}"
done
done
# 2. Upload them.
pip3 install -r automation/symbols-generation/requirements.txt
python3 automation/symbols-generation/upload_symbols.py "${OUTPUT_FOLDER}"