gecko-dev/build/moz.configure/toolchain.configure

1791 строка
63 KiB
Plaintext
Исходник Обычный вид История

# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*-
# vim: set filetype=python:
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
imply_option('--enable-release', mozilla_official)
imply_option('--enable-release', depends_if('MOZ_AUTOMATION')(lambda x: True))
js_option('--enable-release',
default=milestone.is_release_or_beta,
help='Build with more conservative, release engineering-oriented '
'options. This may slow down builds.')
@depends('--enable-release')
def developer_options(value):
if not value:
return True
add_old_configure_assignment('DEVELOPER_OPTIONS', developer_options)
set_config('DEVELOPER_OPTIONS', developer_options)
# PGO
# ==============================================================
Bug 1320738 - Mark MOZ_PGO as a JS option. r=froydnj, a=RyanVM The changesets in bug 1412932 changed the semantics for MOZ_PGO. Before, it was effectively being set as an environment variable by client.mk all the time. Afterwards - specifically after 2013c8dd1824 - the variable is set in mozconfigs via ac_add_options, which means it is only exposed to configure, not the environment. Investigation by dmajor revealed that -WX (warnings as errors) was added to a js/src file's compiler invocation after the PGO code refactor. (PGO and warnings as errors have a strange interaction - bug 437002 - and should be disabled there.) Strangely, addition of -WX was only present on Dev Edition PGO builds. The reason for this is likely mozharness. Mozharness will export the MOZ_PGO=1 environment variable for build configurations that it knows are PGO. It appears to do this for all PGO build configurations except Dev Edition. Since make and moz.configure inherit environment variables, mozharness was basically papering over the intended behavior change in 2013c8dd1824. This commit fixes the problem by marking MOZ_PGO as a JS option in moz.configure. This means `ac_add_options MOZ_PGO=1` (the new convention for enabling PGO) will set MOZ_PGO for SpiderMonkey's moz.configure. Of course, MOZ_PGO=1 in an environment variable still works. And mozharness's setting of this variable has the intended effect. Eventually, I'd like to clean up the mozharness code so it is less PGO aware and enables PGO via ac_add_options. But that's for another day. MozReview-Commit-ID: 1KYPJARI6SJ --HG-- extra : amend_source : 5291cead9f1c1af9ed2a1f608af770bc8e4958c5
2017-11-10 20:23:27 +03:00
js_option(env='MOZ_PGO', help='Build with profile guided optimizations')
set_config('MOZ_PGO', depends('MOZ_PGO')(lambda x: bool(x)))
add_old_configure_assignment('MOZ_PGO', depends('MOZ_PGO')(lambda x: bool(x)))
# Code optimization
# ==============================================================
js_option('--enable-optimize',
nargs='?',
default=True,
help='Enable optimizations via compiler flags')
@depends('--enable-optimize')
def moz_optimize(option):
flags = None
if len(option):
val = '2'
flags = option[0]
elif option:
val = '1'
else:
val = None
return namespace(
optimize=val,
flags=flags,
)
set_config('MOZ_OPTIMIZE', moz_optimize.optimize)
add_old_configure_assignment('MOZ_OPTIMIZE', moz_optimize.optimize)
add_old_configure_assignment('MOZ_CONFIGURE_OPTIMIZE_FLAGS', moz_optimize.flags)
# yasm detection
# ==============================================================
yasm = check_prog('YASM', ['yasm'], allow_missing=True)
@depends_if(yasm)
@checking('yasm version')
def yasm_version(yasm):
version = check_cmd_output(
yasm, '--version',
onerror=lambda: die('Failed to get yasm version.')
).splitlines()[0].split()[1]
return Version(version)
@depends_if(yasm_version)
def yasm_major_version(yasm_version):
return str(yasm_version.major)
@depends_if(yasm_version)
def yasm_minor_version(yasm_version):
return str(yasm_version.minor)
set_config('YASM_MAJOR_VERSION', yasm_major_version)
set_config('YASM_MINOR_VERSION', yasm_minor_version)
# Until we move all the yasm consumers out of old-configure.
# bug 1257904
add_old_configure_assignment('_YASM_MAJOR_VERSION',
yasm_version.major)
add_old_configure_assignment('_YASM_MINOR_VERSION',
yasm_version.minor)
@depends(yasm, target)
def yasm_asflags(yasm, target):
if yasm:
asflags = {
('OSX', 'x86'): ['-f', 'macho32'],
('OSX', 'x86_64'): ['-f', 'macho64'],
('WINNT', 'x86'): ['-f', 'win32'],
('WINNT', 'x86_64'): ['-f', 'x64'],
}.get((target.os, target.cpu), None)
if asflags is None:
# We're assuming every x86 platform we support that's
# not Windows or Mac is ELF.
if target.cpu == 'x86':
asflags = ['-f', 'elf32']
elif target.cpu == 'x86_64':
asflags = ['-f', 'elf64']
if asflags:
asflags += ['-rnasm', '-pnasm']
return asflags
set_config('YASM_ASFLAGS', yasm_asflags)
@depends(yasm_asflags)
def have_yasm(value):
if value:
return True
set_config('HAVE_YASM', have_yasm)
# Until the YASM variable is not necessary in old-configure.
add_old_configure_assignment('YASM', have_yasm)
# Android NDK
# ==============================================================
@depends('--disable-compile-environment', build_project, '--help')
def compiling_android(compile_env, build_project, _):
return compile_env and build_project in ('mobile/android', 'js')
include('android-ndk.configure', when=compiling_android)
# MacOS deployment target version
# ==============================================================
# This needs to happen before any compilation test is done.
option('--enable-macos-target', env='MACOSX_DEPLOYMENT_TARGET', nargs=1,
default='10.9', help='Set the minimum MacOS version needed at runtime')
@depends('--enable-macos-target', target)
@imports(_from='os', _import='environ')
def macos_target(value, target):
if value and target.os == 'OSX':
# Ensure every compiler process we spawn uses this value.
environ['MACOSX_DEPLOYMENT_TARGET'] = value[0]
return value[0]
if value and value.origin != 'default':
die('--enable-macos-target cannot be used when targeting %s',
target.os)
set_config('MACOSX_DEPLOYMENT_TARGET', macos_target)
add_old_configure_assignment('MACOSX_DEPLOYMENT_TARGET', macos_target)
# Xcode state
# ===========
js_option('--disable-xcode-checks',
help='Do not check that Xcode is installed and properly configured')
@depends(host, '--disable-xcode-checks')
def xcode_path(host, xcode_checks):
if host.kernel != 'Darwin' or not xcode_checks:
return
# xcode-select -p prints the path to the installed Xcode. It
# should exit 0 and return non-empty result if Xcode is installed.
def bad_xcode_select():
die('Could not find installed Xcode; install Xcode from the App '
'Store, run it once to perform initial configuration, and then '
'try again; in the rare case you wish to build without Xcode '
'installed, add the --disable-xcode-checks configure flag')
xcode_path = check_cmd_output('xcode-select', '--print-path',
onerror=bad_xcode_select).strip()
if not xcode_path:
bad_xcode_select()
# Now look for the Command Line Tools.
def no_cltools():
die('Could not find installed Xcode Command Line Tools; '
'run `xcode-select --install` and follow the instructions '
'to install them then try again; if you wish to build without '
'Xcode Command Line Tools installed, '
'add the --disable-xcode-checks configure flag')
check_cmd_output('pkgutil', '--pkg-info',
'com.apple.pkg.CLTools_Executables',
onerror=no_cltools)
return xcode_path
set_config('XCODE_PATH', xcode_path)
# Compiler wrappers
# ==============================================================
# Normally, we'd use js_option and automatically have those variables
# propagated to js/src, but things are complicated by possible additional
# wrappers in CC/CXX, and by other subconfigures that do not handle those
# options and do need CC/CXX altered.
option('--with-compiler-wrapper', env='COMPILER_WRAPPER', nargs=1,
help='Enable compiling with wrappers such as distcc and ccache')
option('--with-ccache', env='CCACHE', nargs='?',
help='Enable compiling with ccache')
@depends_if('--with-ccache')
def ccache(value):
if len(value):
return value
# If --with-ccache was given without an explicit value, we default to
# 'ccache'.
return 'ccache'
ccache = check_prog('CCACHE', progs=(), input=ccache)
# Distinguish ccache from sccache.
@depends_if(ccache)
def ccache_is_sccache(ccache):
return check_cmd_output(ccache, '--version').startswith('sccache')
@depends(ccache, ccache_is_sccache)
def using_ccache(ccache, ccache_is_sccache):
return ccache and not ccache_is_sccache
@depends_if(ccache, ccache_is_sccache)
def using_sccache(ccache, ccache_is_sccache):
return ccache and ccache_is_sccache
set_config('MOZ_USING_CCACHE', using_ccache)
set_config('MOZ_USING_SCCACHE', using_sccache)
option(env='SCCACHE_VERBOSE_STATS',
help='Print verbose sccache stats after build')
@depends(using_sccache, 'SCCACHE_VERBOSE_STATS')
def sccache_verbose_stats(using_sccache, verbose_stats):
return using_sccache and bool(verbose_stats)
set_config('SCCACHE_VERBOSE_STATS', sccache_verbose_stats)
@depends('--with-compiler-wrapper', ccache)
@imports(_from='mozbuild.shellutil', _import='split', _as='shell_split')
def compiler_wrapper(wrapper, ccache):
if wrapper:
raw_wrapper = wrapper[0]
wrapper = shell_split(raw_wrapper)
wrapper_program = find_program(wrapper[0])
if not wrapper_program:
die('Cannot find `%s` from the given compiler wrapper `%s`',
wrapper[0], raw_wrapper)
wrapper[0] = wrapper_program
if ccache:
if wrapper:
return tuple([ccache] + wrapper)
else:
return (ccache,)
elif wrapper:
return tuple(wrapper)
add_old_configure_assignment('COMPILER_WRAPPER', compiler_wrapper)
@depends_if(compiler_wrapper)
def using_compiler_wrapper(compiler_wrapper):
return True
set_config('MOZ_USING_COMPILER_WRAPPER', using_compiler_wrapper)
# GC rooting and hazard analysis.
# ==============================================================
option(env='MOZ_HAZARD', help='Build for the GC rooting hazard analysis')
@depends('MOZ_HAZARD')
def hazard_analysis(value):
if value:
return True
set_config('MOZ_HAZARD', hazard_analysis)
# Cross-compilation related things.
# ==============================================================
js_option('--with-toolchain-prefix', env='TOOLCHAIN_PREFIX', nargs=1,
help='Prefix for the target toolchain')
@depends('--with-toolchain-prefix', target, cross_compiling)
def toolchain_prefix(value, target, cross_compiling):
if value:
return tuple(value)
if cross_compiling:
return ('%s-' % target.toolchain, '%s-' % target.alias)
@depends(toolchain_prefix, target)
def first_toolchain_prefix(toolchain_prefix, target):
# Pass TOOLCHAIN_PREFIX down to the build system if it was given from the
# command line/environment (in which case there's only one value in the tuple),
# or when cross-compiling for Android.
if toolchain_prefix and (target.os == 'Android' or len(toolchain_prefix) == 1):
return toolchain_prefix[0]
set_config('TOOLCHAIN_PREFIX', first_toolchain_prefix)
add_old_configure_assignment('TOOLCHAIN_PREFIX', first_toolchain_prefix)
# Compilers
# ==============================================================
include('compilers-util.configure')
def try_preprocess(compiler, language, source):
return try_invoke_compiler(compiler, language, source, ['-E'])
@imports(_from='mozbuild.configure.constants', _import='CompilerType')
@imports(_from='mozbuild.configure.constants',
_import='CPU_preprocessor_checks')
@imports(_from='mozbuild.configure.constants',
_import='kernel_preprocessor_checks')
@imports(_from='textwrap', _import='dedent')
def get_compiler_info(compiler, language):
'''Returns information about the given `compiler` (command line in the
form of a list or tuple), in the given `language`.
The returned information includes:
- the compiler type (msvc, clang-cl, clang or gcc)
- the compiler version
- the compiler supported language
- the compiler supported language version
'''
# Note: MSVC doesn't expose __STDC_VERSION__. It does expose __STDC__,
# but only when given the -Za option, which disables compiler
# extensions.
# Note: We'd normally do a version check for clang, but versions of clang
# in Xcode have a completely different versioning scheme despite exposing
# the version with the same defines.
# So instead, we make things such that the version is missing when the
# clang used is below the minimum supported version (currently clang 3.6).
# We then only include the version information when the C++ compiler
# matches the feature check, so that an unsupported version of clang would
# have no version number.
check = dedent('''\
#if defined(_MSC_VER)
#if defined(__clang__)
%COMPILER "clang-cl"
%VERSION _MSC_FULL_VER
#else
%COMPILER "msvc"
%VERSION _MSC_FULL_VER
#endif
#elif defined(__clang__)
%COMPILER "clang"
# if !__cplusplus || __has_feature(cxx_alignof)
%VERSION __clang_major__.__clang_minor__.__clang_patchlevel__
# endif
#elif defined(__GNUC__)
%COMPILER "gcc"
%VERSION __GNUC__.__GNUC_MINOR__.__GNUC_PATCHLEVEL__
#endif
#if __cplusplus
%cplusplus __cplusplus
#elif __STDC_VERSION__
%STDC_VERSION __STDC_VERSION__
#elif __STDC__
%STDC_VERSION 198900L
#endif
''')
# While we're doing some preprocessing, we might as well do some more
# preprocessor-based tests at the same time, to check the toolchain
# matches what we want.
for name, preprocessor_checks in (
('CPU', CPU_preprocessor_checks),
('KERNEL', kernel_preprocessor_checks),
):
for n, (value, condition) in enumerate(preprocessor_checks.iteritems()):
check += dedent('''\
#%(if)s %(condition)s
%%%(name)s "%(value)s"
''' % {
'if': 'elif' if n else 'if',
'condition': condition,
'name': name,
'value': value,
})
check += '#endif\n'
# Also check for endianness. The advantage of living in modern times is
# that all the modern compilers we support now have __BYTE_ORDER__ defined
# by the preprocessor, except MSVC, which only supports little endian.
check += dedent('''\
#if _MSC_VER || __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
%ENDIANNESS "little"
#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
%ENDIANNESS "big"
#endif
''')
result = try_preprocess(compiler, language, check)
if not result:
raise FatalCheckError(
'Unknown compiler or compiler not supported.')
# Metadata emitted by preprocessors such as GCC with LANG=ja_JP.utf-8 may
# have non-ASCII characters. Treat the output as bytearray.
data = {}
for line in result.splitlines():
if line.startswith(b'%'):
k, _, v = line.partition(' ')
k = k.lstrip('%')
data[k] = v.replace(' ', '').lstrip('"').rstrip('"')
log.debug('%s = %s', k, data[k])
try:
type = CompilerType(data['COMPILER'])
except Exception:
raise FatalCheckError(
'Unknown compiler or compiler not supported.')
cplusplus = int(data.get('cplusplus', '0L').rstrip('L'))
stdc_version = int(data.get('STDC_VERSION', '0L').rstrip('L'))
version = data.get('VERSION')
if version and type in ('msvc', 'clang-cl'):
msc_ver = version
version = msc_ver[0:2]
if len(msc_ver) > 2:
version += '.' + msc_ver[2:4]
if len(msc_ver) > 4:
version += '.' + msc_ver[4:]
if version:
version = Version(version)
return namespace(
type=type,
version=version,
cpu=data.get('CPU'),
kernel=data.get('KERNEL'),
endianness=data.get('ENDIANNESS'),
language='C++' if cplusplus else 'C',
language_version=cplusplus if cplusplus else stdc_version,
)
@imports(_from='mozbuild.shellutil', _import='quote')
def check_compiler(compiler, language, target):
info = get_compiler_info(compiler, language)
flags = []
def append_flag(flag):
if flag not in flags:
if info.type == 'clang-cl':
flags.append('-Xclang')
flags.append(flag)
# Check language standards
# --------------------------------------------------------------------
if language != info.language:
raise FatalCheckError(
'`%s` is not a %s compiler.' % (quote(*compiler), language))
# Note: We do a strict version check because there sometimes are backwards
# incompatible changes in the standard, and not all code that compiles as
# C99 compiles as e.g. C11 (as of writing, this is true of libnestegg, for
# example)
if info.language == 'C' and info.language_version != 199901:
if info.type in ('clang-cl', 'clang', 'gcc'):
append_flag('-std=gnu99')
# Note: MSVC, while supporting C++14, still reports 199711L for __cplusplus.
# Note: this is a strict version check because we used to always add
# -std=gnu++14.
cxx14_version = 201402
if info.language == 'C++':
if info.type == 'clang' and info.language_version != cxx14_version:
append_flag('-std=gnu++14')
# MSVC headers include C++14 features, but don't guard them
# with appropriate checks.
elif info.type == 'clang-cl' and info.language_version != cxx14_version:
append_flag('-std=c++14')
# We force clang-cl to emulate Visual C++ 2017 version 15.6.0
msvc_version = '19.13.26128'
if info.type == 'clang-cl' and info.version != msvc_version:
# This flag is a direct clang-cl flag that doesn't need -Xclang,
# add it directly.
flags.append('-fms-compatibility-version=%s' % msvc_version)
# Check compiler target
# --------------------------------------------------------------------
if not info.cpu or info.cpu != target.cpu:
if info.type == 'clang':
append_flag('--target=%s' % target.toolchain)
elif info.type == 'clang-cl':
# Ideally this would share the 'clang' branch above, but on Windows
# the --target needs additional data like ms-compatibility-version.
if (info.cpu, target.cpu) == ('x86_64', 'x86'):
# -m32 does not use -Xclang, so add it directly.
flags.append('-m32')
elif info.type == 'gcc':
same_arch_different_bits = (
('x86', 'x86_64'),
('ppc', 'ppc64'),
('sparc', 'sparc64'),
)
if (target.cpu, info.cpu) in same_arch_different_bits:
append_flag('-m32')
elif (info.cpu, target.cpu) in same_arch_different_bits:
append_flag('-m64')
if not info.kernel or info.kernel != target.kernel:
if info.type == 'clang':
append_flag('--target=%s' % target.toolchain)
if not info.endianness or info.endianness != target.endianness:
if info.type == 'clang':
append_flag('--target=%s' % target.toolchain)
return namespace(
type=info.type,
version=info.version,
target_cpu=info.cpu,
target_kernel=info.kernel,
target_endianness=info.endianness,
flags=flags,
)
@imports(_from='__builtin__', _import='open')
@imports('json')
@imports('subprocess')
@imports('sys')
def get_vc_paths(topsrcdir):
def vswhere(args):
encoding = 'mbcs' if sys.platform == 'win32' else 'utf-8'
return json.loads(
subprocess.check_output([
os.path.join(topsrcdir, 'build/win32/vswhere.exe'),
'-format',
'json'
] + args).decode(encoding, 'replace'))
for install in vswhere(['-requires', 'Microsoft.VisualStudio.Component.VC.Tools.x86.x64']):
path = install['installationPath']
tools_version = open(os.path.join(
path, r'VC\Auxiliary\Build\Microsoft.VCToolsVersion.default.txt'), 'rb').read().strip()
tools_path = os.path.join(
path, r'VC\Tools\MSVC', tools_version, r'bin\HostX64')
yield (Version(install['installationVersion']), {
'x64': [os.path.join(tools_path, 'x64')],
# The x64->x86 cross toolchain requires DLLs from the native x64 toolchain.
'x86': [os.path.join(tools_path, 'x86'), os.path.join(tools_path, 'x64')],
'arm64': [os.path.join(tools_path, 'x64')],
})
js_option('--with-visual-studio-version', nargs=1,
choices=('2017',),
help='Select a specific Visual Studio version to use')
@depends('--with-visual-studio-version')
def vs_major_version(value):
if value:
return {'2017': 15}[value[0]]
@depends(host, target, vs_major_version, check_build_environment, '--with-visual-studio-version')
@imports(_from='__builtin__', _import='sorted')
@imports(_from='operator', _import='itemgetter')
@imports('platform')
def vc_compiler_path(host, target, vs_major_version, env, vs_release_name):
if host.kernel != 'WINNT':
return
vc_target = {
'x86': 'x86',
'x86_64': 'x64',
'arm': 'arm',
'aarch64': 'arm64'
}.get(target.cpu)
if vc_target is None:
return
all_versions = sorted(get_vc_paths(env.topsrcdir), key=itemgetter(0))
if not all_versions:
return
if vs_major_version:
versions = [d for (v, d) in all_versions if v.major ==
vs_major_version]
if not versions:
die('Visual Studio %s could not be found!' % vs_release_name)
data = versions[0]
else:
# Choose the newest version.
data = all_versions[-1][1]
paths = data.get(vc_target)
if not paths:
return
return paths
@depends(vc_compiler_path)
@imports('os')
@imports(_from='os', _import='environ')
def toolchain_search_path(vc_compiler_path):
result = [environ.get('PATH')]
if vc_compiler_path:
result.extend(vc_compiler_path)
# Also add in the location to which `mach bootstrap` or
# `mach artifact toolchain` installs clang.
mozbuild_state_dir = environ.get('MOZBUILD_STATE_PATH',
os.path.expanduser(os.path.join('~', '.mozbuild')))
bootstrap_clang_path = os.path.join(mozbuild_state_dir, 'clang', 'bin')
result.append(bootstrap_clang_path)
bootstrap_cbindgen_path = os.path.join(mozbuild_state_dir, 'cbindgen')
result.append(bootstrap_cbindgen_path)
if vc_compiler_path:
# We're going to alter PATH for good in windows.configure, but we also
# need to do it for the valid_compiler() check below. This is only needed
# on Windows, where MSVC needs PATH set to find dlls.
environ['PATH'] = os.pathsep.join(result)
return result
@template
def default_c_compilers(host_or_target):
'''Template defining the set of default C compilers for the host and
target platforms.
`host_or_target` is either `host` or `target` (the @depends functions
from init.configure.
'''
assert host_or_target in (host, target)
@depends(host_or_target, target, toolchain_prefix, android_clang_compiler,
developer_options)
def default_c_compilers(host_or_target, target, toolchain_prefix,
android_clang_compiler, developer_options):
gcc = ('gcc',)
if toolchain_prefix and host_or_target is target:
gcc = tuple('%sgcc' % p for p in toolchain_prefix) + gcc
# Android sets toolchain_prefix and android_clang_compiler, but
# we want the latter to take precedence, because the latter can
# point at clang, which is what we want to use.
if android_clang_compiler and host_or_target is target:
return (android_clang_compiler,) + gcc
if host_or_target.kernel == 'WINNT':
return ('clang-cl', 'cl') + gcc + ('clang',)
if host_or_target.kernel == 'Darwin':
return ('clang',)
if developer_options:
return ('clang',) + gcc
return gcc + ('clang',)
return default_c_compilers
@template
def default_cxx_compilers(c_compiler):
'''Template defining the set of default C++ compilers for the host and
target platforms.
`c_compiler` is the @depends function returning a Compiler instance for
the desired platform.
Because the build system expects the C and C++ compilers to be from the
same compiler suite, we derive the default C++ compilers from the C
compiler that was found if none was provided.
'''
@depends(c_compiler)
def default_cxx_compilers(c_compiler):
dir = os.path.dirname(c_compiler.compiler)
file = os.path.basename(c_compiler.compiler)
if c_compiler.type == 'gcc':
return (os.path.join(dir, file.replace('gcc', 'g++')),)
if c_compiler.type == 'clang':
return (os.path.join(dir, file.replace('clang', 'clang++')),)
return (c_compiler.compiler,)
return default_cxx_compilers
@template
def compiler(language, host_or_target, c_compiler=None, other_compiler=None,
other_c_compiler=None):
'''Template handling the generic base checks for the compiler for the
given `language` on the given platform (`host_or_target`).
`host_or_target` is either `host` or `target` (the @depends functions
from init.configure.
When the language is 'C++', `c_compiler` is the result of the `compiler`
template for the language 'C' for the same `host_or_target`.
When `host_or_target` is `host`, `other_compiler` is the result of the
`compiler` template for the same `language` for `target`.
When `host_or_target` is `host` and the language is 'C++',
`other_c_compiler` is the result of the `compiler` template for the
language 'C' for `target`.
'''
assert host_or_target in (host, target)
assert language in ('C', 'C++')
assert language == 'C' or c_compiler is not None
assert host_or_target == target or other_compiler is not None
assert language == 'C' or host_or_target == target or \
other_c_compiler is not None
host_or_target_str = {
host: 'host',
target: 'target',
}[host_or_target]
var = {
('C', target): 'CC',
('C++', target): 'CXX',
('C', host): 'HOST_CC',
('C++', host): 'HOST_CXX',
}[language, host_or_target]
default_compilers = {
'C': lambda: default_c_compilers(host_or_target),
'C++': lambda: default_cxx_compilers(c_compiler),
}[language]()
what = 'the %s %s compiler' % (host_or_target_str, language)
option(env=var, nargs=1, help='Path to %s' % what)
# Handle the compiler given by the user through one of the CC/CXX/HOST_CC/
# HOST_CXX variables.
@depends_if(var)
@imports(_from='itertools', _import='takewhile')
@imports(_from='mozbuild.shellutil', _import='split', _as='shell_split')
def provided_compiler(cmd):
# Historically, the compiler variables have contained more than the
# path to the compiler itself. So for backwards compatibility, try to
# find what is what in there, assuming the first dash-prefixed item is
# a compiler option, the item before that is the compiler, and anything
# before that is a compiler wrapper.
cmd = shell_split(cmd[0])
without_flags = list(takewhile(lambda x: not x.startswith('-'), cmd))
return namespace(
wrapper=without_flags[:-1],
compiler=without_flags[-1],
flags=cmd[len(without_flags):],
)
# Derive the host compiler from the corresponding target compiler when no
# explicit compiler was given and we're not cross compiling. For the C++
# compiler, though, prefer to derive from the host C compiler when it
# doesn't match the target C compiler.
# As a special case, since clang supports all kinds of targets in the same
# executable, when cross compiling with clang, default to the same compiler
# as the target compiler, resetting flags. However, Android NDK clangs do
# not function as host compilers -- they're target compilers only -- so
# don't use clang target as host if the target OS is Android.
if host_or_target == host:
if other_c_compiler is not None:
args = (c_compiler, other_c_compiler)
else:
args = ()
@depends(provided_compiler, other_compiler, cross_compiling,
target, *args)
def provided_compiler(value, other_compiler, cross_compiling,
target, *args):
if value:
return value
c_compiler, other_c_compiler = args if args else (None, None)
if not cross_compiling and c_compiler == other_c_compiler:
return other_compiler
if cross_compiling and other_compiler.type == 'clang' and \
target.os != 'Android':
return namespace(**{
k: [] if k == 'flags' else v
for k, v in other_compiler.__dict__.iteritems()
})
# Normally, we'd use `var` instead of `_var`, but the interaction with
# old-configure complicates things, and for now, we a) can't take the plain
# result from check_prog as CC/CXX/HOST_CC/HOST_CXX and b) have to let
# old-configure AC_SUBST it (because it's autoconf doing it, not us)
compiler = check_prog('_%s' % var, what=what, progs=default_compilers,
input=provided_compiler.compiler,
paths=toolchain_search_path)
@depends(compiler, provided_compiler, compiler_wrapper, host_or_target)
@checking('whether %s can be used' % what, lambda x: bool(x))
@imports(_from='mozbuild.shellutil', _import='quote')
def valid_compiler(compiler, provided_compiler, compiler_wrapper,
host_or_target):
wrapper = list(compiler_wrapper or ())
if provided_compiler:
provided_wrapper = list(provided_compiler.wrapper)
# When doing a subconfigure, the compiler is set by old-configure
# and it contains the wrappers from --with-compiler-wrapper and
# --with-ccache.
if provided_wrapper[:len(wrapper)] == wrapper:
provided_wrapper = provided_wrapper[len(wrapper):]
wrapper.extend(provided_wrapper)
flags = provided_compiler.flags
else:
flags = []
# Ideally, we'd always use the absolute path, but unfortunately, on
# Windows, the compiler is very often in a directory containing spaces.
# Unfortunately, due to the way autoconf does its compiler tests with
# eval, that doesn't work out. So in that case, check that the
# compiler can still be found in $PATH, and use the file name instead
# of the full path.
if quote(compiler) != compiler:
full_path = os.path.abspath(compiler)
compiler = os.path.basename(compiler)
found_compiler = find_program(compiler)
if not found_compiler:
die('%s is not in your $PATH'
% quote(os.path.dirname(full_path)))
if os.path.normcase(find_program(compiler)) != os.path.normcase(
full_path):
die('Found `%s` before `%s` in your $PATH. '
'Please reorder your $PATH.',
quote(os.path.dirname(found_compiler)),
quote(os.path.dirname(full_path)))
info = check_compiler(wrapper + [compiler] + flags, language,
host_or_target)
# Check that the additional flags we got are enough to not require any
# more flags. If we get an exception, just ignore it; it's liable to be
# invalid command-line flags, which means the compiler we're checking
# doesn't support those command-line flags and will fail one or more of
# the checks below.
try:
if info.flags:
flags += info.flags
info = check_compiler(wrapper + [compiler] + flags, language,
host_or_target)
except FatalCheckError:
pass
if not info.target_cpu or info.target_cpu != host_or_target.cpu:
raise FatalCheckError(
'%s %s compiler target CPU (%s) does not match --%s CPU (%s)'
% (host_or_target_str.capitalize(), language,
info.target_cpu or 'unknown', host_or_target_str,
host_or_target.raw_cpu))
if not info.target_kernel or (info.target_kernel !=
host_or_target.kernel):
raise FatalCheckError(
'%s %s compiler target kernel (%s) does not match --%s kernel (%s)'
% (host_or_target_str.capitalize(), language,
info.target_kernel or 'unknown', host_or_target_str,
host_or_target.kernel))
if not info.target_endianness or (info.target_endianness !=
host_or_target.endianness):
raise FatalCheckError(
'%s %s compiler target endianness (%s) does not match --%s '
'endianness (%s)'
% (host_or_target_str.capitalize(), language,
info.target_endianness or 'unknown', host_or_target_str,
host_or_target.endianness))
# Compiler version checks
# ===================================================
# Check the compiler version here instead of in `compiler_version` so
# that the `checking` message doesn't pretend the compiler can be used
# to then bail out one line later.
if info.type == 'gcc':
if host_or_target.os == 'Android':
raise FatalCheckError('GCC is not supported on Android.\n'
'Please use clang from the Android NDK instead.')
if info.version < '6.1.0':
raise FatalCheckError(
'Only GCC 6.1 or newer is supported (found version %s).'
% info.version)
# If you want to bump the version check here search for
# cxx_alignof above, and see the associated comment.
if info.type == 'clang' and not info.version:
raise FatalCheckError(
'Only clang/llvm 3.6 or newer is supported.')
if info.type == 'msvc':
if info.version < '19.13.26128':
raise FatalCheckError(
'This version (%s) of the MSVC compiler is not '
'supported.\n'
'You must install Visual C++ 2017 Update 6 or '
'Update 8 or later in order to build.\n'
'See https://developer.mozilla.org/en/'
'Windows_Build_Prerequisites' % info.version)
# MSVC version 15.7 and the previews for 15.8, at least,
# can't build Firefox.
if info.version >= '19.14.0' and info.version < '19.15.0':
raise FatalCheckError(
'This version (%s) of the MSVC compiler is not '
'supported due to compiler bugs.\n'
'You must install Visual C++ 2017 Update 6 or '
'Update 8 or later in order to build.\n'
'See https://developer.mozilla.org/en/'
'Windows_Build_Prerequisites' % info.version)
if info.flags:
raise FatalCheckError(
'Unknown compiler or compiler not supported.')
return namespace(
wrapper=wrapper,
compiler=compiler,
flags=flags,
type=info.type,
version=info.version,
language=language,
)
@depends(valid_compiler)
@checking('%s version' % what)
def compiler_version(compiler):
return compiler.version
if language == 'C++':
@depends(valid_compiler, c_compiler)
def valid_compiler(compiler, c_compiler):
if compiler.type != c_compiler.type:
die('The %s C compiler is %s, while the %s C++ compiler is '
'%s. Need to use the same compiler suite.',
host_or_target_str, c_compiler.type,
host_or_target_str, compiler.type)
if compiler.version != c_compiler.version:
die('The %s C compiler is version %s, while the %s C++ '
'compiler is version %s. Need to use the same compiler '
'version.',
host_or_target_str, c_compiler.version,
host_or_target_str, compiler.version)
return compiler
# Set CC/CXX/HOST_CC/HOST_CXX for old-configure, which needs the wrapper
# and the flags that were part of the user input for those variables to
# be provided.
add_old_configure_assignment(var, depends_if(valid_compiler)(
lambda x: list(x.wrapper) + [x.compiler] + list(x.flags)))
# Set CC_TYPE/CC_VERSION/HOST_CC_TYPE/HOST_CC_VERSION to allow
# old-configure to do some of its still existing checks.
if language == 'C':
set_config(
'%s_TYPE' % var, valid_compiler.type)
add_old_configure_assignment(
'%s_TYPE' % var, valid_compiler.type)
add_old_configure_assignment(
'%s_VERSION' % var, valid_compiler.version)
valid_compiler = compiler_class(valid_compiler, host_or_target)
def compiler_error():
raise FatalCheckError('Failed compiling a simple %s source with %s'
% (language, what))
valid_compiler.try_compile(check_msg='%s works' % what,
onerror=compiler_error)
# Set CPP/CXXCPP for both the build system and old-configure. We don't
# need to check this works for preprocessing, because we already relied
# on $CC -E/$CXX -E doing preprocessing work to validate the compiler
# in the first place.
if host_or_target == target:
pp_var = {
'C': 'CPP',
'C++': 'CXXCPP',
}[language]
preprocessor = depends_if(valid_compiler)(
lambda x: list(x.wrapper) + [x.compiler, '-E'] + list(x.flags))
set_config(pp_var, preprocessor)
add_old_configure_assignment(pp_var, preprocessor)
if language == 'C':
linker_var = {
target: 'LD',
host: 'HOST_LD',
}[host_or_target]
@deprecated_option(env=linker_var, nargs=1)
def linker(value):
if value:
return value[0]
@depends(valid_compiler, linker)
def unused_linker(compiler, linker):
if linker and compiler.type != 'msvc':
log.warning('The value of %s is not used by this build system.'
% linker_var)
if host_or_target == target:
@depends(valid_compiler)
def is_msvc(compiler):
return compiler.type == 'msvc'
imply_option('LINKER', linker, reason='LD', when=is_msvc)
return valid_compiler
c_compiler = compiler('C', target)
cxx_compiler = compiler('C++', target, c_compiler=c_compiler)
host_c_compiler = compiler('C', host, other_compiler=c_compiler)
host_cxx_compiler = compiler('C++', host, c_compiler=host_c_compiler,
other_compiler=cxx_compiler,
other_c_compiler=c_compiler)
# Generic compiler-based conditions.
non_msvc_compiler = depends(c_compiler)(lambda info: info.type != 'msvc')
building_with_gcc = depends(c_compiler)(lambda info: info.type == 'gcc')
@depends(c_compiler)
def msvs_version(info):
# clang-cl emulates the same version scheme as cl. And MSVS_VERSION needs to
# be set for GYP on Windows.
if info.type in ('clang-cl', 'msvc'):
if info.version >= '19.10':
return '2017'
return ''
set_config('MSVS_VERSION', msvs_version)
include('compile-checks.configure')
@depends(have_64_bit,
try_compile(body='static_assert(sizeof(void *) == 8, "")',
check_msg='for 64-bit OS'))
def check_have_64_bit(have_64_bit, compiler_have_64_bit):
if have_64_bit != compiler_have_64_bit:
configure_error('The target compiler does not agree with configure '
'about the target bitness.')
option(env='BINDGEN_CFLAGS',
nargs=1,
default=bindgen_cflags_defaults,
help='Options bindgen should pass to the C/C++ parser')
@depends('BINDGEN_CFLAGS')
@checking('bindgen cflags', lambda s: s if s else 'no')
def bindgen_cflags(value):
if value and len(value):
return value[0].split()
add_old_configure_assignment('_BINDGEN_CFLAGS', bindgen_cflags)
@depends(c_compiler)
def default_debug_flags(compiler_info):
# Debug info is ON by default.
if compiler_info.type in ('msvc', 'clang-cl'):
return '-Zi'
return '-g'
option(env='MOZ_DEBUG_FLAGS',
nargs=1,
help='Debug compiler flags')
imply_option('--enable-debug-symbols',
depends_if('--enable-debug')(lambda v: v))
js_option('--enable-debug-symbols',
nargs='?',
default=True,
help='Enable debug symbols using the given compiler flags')
set_config('MOZ_DEBUG_SYMBOLS',
depends_if('--enable-debug-symbols')(lambda _: True))
@depends('MOZ_DEBUG_FLAGS', '--enable-debug-symbols', default_debug_flags)
def debug_flags(env_debug_flags, enable_debug_flags, default_debug_flags):
# If MOZ_DEBUG_FLAGS is set, and --enable-debug-symbols is set to a value,
# --enable-debug-symbols takes precedence. Note, the value of
# --enable-debug-symbols may be implied by --enable-debug.
if len(enable_debug_flags):
return enable_debug_flags[0]
if env_debug_flags:
return env_debug_flags[0]
return default_debug_flags
set_config('MOZ_DEBUG_FLAGS', debug_flags)
add_old_configure_assignment('MOZ_DEBUG_FLAGS', debug_flags)
@depends(c_compiler)
def color_cflags(info):
# We could test compiling with flags. By why incur the overhead when
# color support should always be present in a specific toolchain
# version?
# Code for auto-adding this flag to compiler invocations needs to
# determine if an existing flag isn't already present. That is likely
# using exact string matching on the returned value. So if the return
# value changes to e.g. "<x>=always", exact string match may fail and
# multiple color flags could be added. So examine downstream consumers
# before adding flags to return values.
if info.type == 'gcc':
return '-fdiagnostics-color'
elif info.type == 'clang':
return '-fcolor-diagnostics'
else:
return ''
set_config('COLOR_CFLAGS', color_cflags)
# Some standard library headers (notably bionic on Android) declare standard
# functions (e.g. getchar()) and also #define macros for those standard
# functions. libc++ deals with this by doing something like the following
# (explanatory comments added):
#
# #ifdef FUNC
# // Capture the definition of FUNC.
# inline _LIBCPP_INLINE_VISIBILITY int __libcpp_FUNC(...) { return FUNC(...); }
# #undef FUNC
# // Use a real inline definition.
# inline _LIBCPP_INLINE_VISIBILITY int FUNC(...) { return _libcpp_FUNC(...); }
# #endif
#
# _LIBCPP_INLINE_VISIBILITY is typically defined as:
#
# __attribute__((__visibility__("hidden"), __always_inline__))
#
# Unfortunately, this interacts badly with our system header wrappers, as the:
#
# #pragma GCC visibility push(default)
#
# that they do prior to including the actual system header is treated by the
# compiler as an explicit declaration of visibility on every function declared
# in the header. Therefore, when the libc++ code above is encountered, it is
# as though the compiler has effectively seen:
#
# int FUNC(...) __attribute__((__visibility__("default")));
# int FUNC(...) __attribute__((__visibility__("hidden")));
#
# and the compiler complains about the mismatched visibility declarations.
#
# However, libc++ will only define _LIBCPP_INLINE_VISIBILITY if there is no
# existing definition. We can therefore define it to the empty string (since
# we are properly managing visibility ourselves) and avoid this whole mess.
# Note that we don't need to do this with gcc, as libc++ detects gcc and
# effectively does the same thing we are doing here.
#
# _LIBCPP_ALWAYS_INLINE needs similar workarounds, since it too declares
# hidden visibility.
@depends(c_compiler, target)
def libcxx_override_visibility(c_compiler, target):
if c_compiler.type == 'clang' and target.os == 'Android':
return ''
set_define('_LIBCPP_INLINE_VISIBILITY', libcxx_override_visibility)
set_define('_LIBCPP_INLINE_VISIBILITY_EXCEPT_GCC49',
libcxx_override_visibility)
set_define('_LIBCPP_ALWAYS_INLINE', libcxx_override_visibility)
set_define('_LIBCPP_ALWAYS_INLINE_EXCEPT_GCC49', libcxx_override_visibility)
@depends(target, check_build_environment)
def visibility_flags(target, env):
if target.os != 'WINNT':
if target.kernel == 'Darwin':
return ('-fvisibility=hidden', '-fvisibility-inlines-hidden')
return ('-I%s/system_wrappers' % os.path.join(env.dist),
'-include',
'%s/config/gcc_hidden.h' % env.topsrcdir)
@depends(target, visibility_flags)
def wrap_system_includes(target, visibility_flags):
if visibility_flags and target.kernel != 'Darwin':
return True
set_define('HAVE_VISIBILITY_HIDDEN_ATTRIBUTE',
depends(visibility_flags)(lambda v: bool(v) or None))
set_define('HAVE_VISIBILITY_ATTRIBUTE',
depends(visibility_flags)(lambda v: bool(v) or None))
set_config('WRAP_SYSTEM_INCLUDES', wrap_system_includes)
set_config('VISIBILITY_FLAGS', visibility_flags)
@depends(c_compiler, check_build_environment, target)
@imports('multiprocessing')
@imports(_from='__builtin__', _import='min')
def pgo_flags(compiler, build_env, target):
topobjdir = build_env.topobjdir
if topobjdir.endswith('/js/src'):
topobjdir = topobjdir[:-7]
if compiler.type == 'gcc':
return namespace(
gen_cflags=['-fprofile-generate'],
gen_ldflags=['-fprofile-generate'],
use_cflags=['-fprofile-use', '-fprofile-correction',
'-Wcoverage-mismatch'],
use_ldflags=['-fprofile-use'],
)
if compiler.type in ('clang-cl', 'clang'):
profdata = os.path.join(topobjdir, 'merged.profdata')
if compiler.type == 'clang-cl':
if target.cpu == 'x86_64':
gen_ldflags = ['clang_rt.profile-x86_64.lib']
elif target.cpu == 'x86':
gen_ldflags = ['clang_rt.profile-i386.lib']
else:
gen_ldflags = None
else:
gen_ldflags = ['-fprofile-instr-generate']
if gen_ldflags:
return namespace(
gen_cflags=['-fprofile-instr-generate'],
gen_ldflags=gen_ldflags,
use_cflags=['-fprofile-instr-use=%s' % profdata,
'-Wno-error=profile-instr-out-of-date',
'-Wno-error=profile-instr-unprofiled'],
use_ldflags=[],
)
if compiler.type == 'msvc':
num_cores = min(8, multiprocessing.cpu_count())
cgthreads = '-CGTHREADS:%s' % num_cores
return namespace(
gen_cflags=['-GL'],
gen_ldflags=['-LTCG:PGINSTRUMENT', '-PogoSafeMode', cgthreads],
# XXX: PGO builds can fail with warnings treated as errors,
# specifically "no profile data available" appears to be
# treated as an error sometimes. This might be a consequence
# of using WARNINGS_AS_ERRORS in some modules, combined
# with the linker doing most of the work in the whole-program
# optimization/PGO case. I think it's probably a compiler bug,
# but we work around it here.
use_cflags=['-GL', '-wd4624', '-wd4952'],
# XXX: should be -LTCG:PGOPTIMIZE, but that fails on libxul.
# Probably also a compiler bug, but what can you do?
# /d2:-cgsummary prints a summary of what is happening during
# code generation. How long individual functions are optimized,
# which functions are optimized, etc.
use_ldflags=['-LTCG:PGUPDATE', cgthreads, '-d2:-cgsummary'],
)
set_config('PROFILE_GEN_CFLAGS', pgo_flags.gen_cflags)
set_config('PROFILE_GEN_LDFLAGS', pgo_flags.gen_ldflags)
set_config('PROFILE_USE_CFLAGS', pgo_flags.use_cflags)
set_config('PROFILE_USE_LDFLAGS', pgo_flags.use_ldflags)
llvm_profdata = check_prog('LLVM_PROFDATA', ['llvm-profdata'],
allow_missing=True)
add_old_configure_assignment('LLVM_PROFDATA', llvm_profdata)
@depends(c_compiler)
def preprocess_option(compiler):
# The uses of PREPROCESS_OPTION depend on the spacing for -o/-Fi.
if compiler.type in ('gcc', 'clang'):
return '-E -o '
else:
return '-P -Fi'
set_config('PREPROCESS_OPTION', preprocess_option)
# We only want to include windows.configure when we are compiling on
# Windows, for Windows.
@depends(target, host)
def is_windows(target, host):
return host.kernel == 'WINNT' and target.kernel == 'WINNT'
include('windows.configure', when=is_windows)
# Shader Compiler for Windows (and MinGW Cross Compile)
# ==============================================================
fxc = check_prog('FXC', ('fxc.exe', 'fxc2.exe'), when=depends(target)
(lambda t: t.kernel == 'WINNT'))
wine = check_prog('WINE', ['wine'], when=depends(target, host)
(lambda t, h: t.kernel == 'WINNT' and h.kernel == 'Linux'))
# LTO
# ==============================================================
js_option('--enable-lto',
nargs='?',
choices=('full', 'thin'),
help='Enable LTO')
@depends('--enable-lto', 'MOZ_PGO', c_compiler)
@imports('multiprocessing')
def lto(value, pgo, c_compiler):
cflags = []
ldflags = []
enabled = None
# MSVC's implementation of PGO implies LTO. Make clang-cl match this.
if c_compiler.type == 'clang-cl' and pgo and value.origin == 'default':
value = ['thin']
if value:
enabled = True
if c_compiler.type == 'clang':
if len(value) and value[0].lower() == 'full':
cflags.append("-flto")
ldflags.append("-flto")
else:
cflags.append("-flto=thin")
ldflags.append("-flto=thin")
elif c_compiler.type == 'clang-cl':
if len(value) and value[0].lower() == 'full':
cflags.append("-flto")
else:
cflags.append("-flto=thin")
# With clang-cl, -flto can only be used with -c or -fuse-ld=lld.
# AC_TRY_LINKs during configure don't have -c, so pass -fuse-ld=lld.
cflags.append("-fuse-ld=lld");
else:
num_cores = multiprocessing.cpu_count()
cflags.append("-flto")
cflags.append("-flifetime-dse=1")
ldflags.append("-flto=%s" % num_cores)
ldflags.append("-flifetime-dse=1")
return namespace(
enabled=enabled,
cflags=cflags,
ldflags=ldflags
)
add_old_configure_assignment('MOZ_LTO', lto.enabled)
set_config('MOZ_LTO', lto.enabled)
set_define('MOZ_LTO', lto.enabled)
set_config('MOZ_LTO_CFLAGS', lto.cflags)
set_config('MOZ_LTO_LDFLAGS', lto.ldflags)
add_old_configure_assignment('MOZ_LTO_CFLAGS', lto.cflags)
add_old_configure_assignment('MOZ_LTO_LDFLAGS', lto.ldflags)
# ASAN
# ==============================================================
js_option('--enable-address-sanitizer', help='Enable Address Sanitizer')
@depends_if('--enable-address-sanitizer')
def asan(value):
return True
add_old_configure_assignment('MOZ_ASAN', asan)
# Security Hardening
# ==============================================================
option('--enable-hardening', env='MOZ_SECURITY_HARDENING',
help='Enables security hardening compiler options')
@depends('--enable-hardening', '--enable-address-sanitizer',
'--enable-optimize', c_compiler, target)
def security_hardening_cflags(hardening_flag, asan, optimize, c_compiler, target):
compiler_is_gccish = c_compiler.type in ('gcc', 'clang')
flags = []
js_flags = []
# FORTIFY_SOURCE ------------------------------------
# If hardening is explicitly enabled, or not explicitly disabled
if hardening_flag.origin == "default" or hardening_flag:
# Require optimization for FORTIFY_SOURCE. See Bug 1417452
# Also, undefine it before defining it just in case a distro adds it, see Bug 1418398
if compiler_is_gccish and optimize and not asan:
# Don't enable FORTIFY_SOURCE on Android on the top-level, but do enable in js/
if target.os != 'Android':
flags.append("-U_FORTIFY_SOURCE")
flags.append("-D_FORTIFY_SOURCE=2")
js_flags.append("-U_FORTIFY_SOURCE")
js_flags.append("-D_FORTIFY_SOURCE=2")
# If ASAN _is_ on, undefine FOTIFY_SOURCE just to be safe
if asan:
flags.append("-U_FORTIFY_SOURCE")
js_flags.append("-U_FORTIFY_SOURCE")
# fstack-protector ------------------------------------
# Enable only if --enable-hardening is passed and ASAN is
# not on as ASAN will catch the crashes for us
if hardening_flag and compiler_is_gccish and not asan:
flags.append("-fstack-protector-strong")
# fno-common -----------------------------------------
# Do not merge variables for ASAN; can detect some subtle bugs
if asan:
# clang-cl does not recognize the flag, it must be passed down to clang
if c_compiler.type == 'clang-cl':
flags.append("-Xclang")
flags.append("-fno-common")
return namespace(
flags=flags,
js_flags=js_flags,
)
add_old_configure_assignment('MOZ_HARDENING_CFLAGS', security_hardening_cflags.flags)
add_old_configure_assignment('MOZ_HARDENING_CFLAGS_JS', security_hardening_cflags.js_flags)
# Code Coverage
# ==============================================================
js_option('--enable-coverage', env='MOZ_CODE_COVERAGE',
help='Enable code coverage')
@depends('--enable-coverage')
def code_coverage(value):
if value:
return True
set_config('MOZ_CODE_COVERAGE', code_coverage)
set_define('MOZ_CODE_COVERAGE', code_coverage)
# ==============================================================
option(env='RUSTFLAGS',
nargs=1,
help='Rust compiler flags')
set_config('RUSTFLAGS', depends('RUSTFLAGS')(lambda flags: flags))
# Rust compiler flags
# ==============================================================
js_option(env='RUSTC_OPT_LEVEL',
nargs=1,
help='Rust compiler optimization level (-C opt-level=%s)')
# --enable-release kicks in full optimizations.
imply_option('RUSTC_OPT_LEVEL', '2', when='--enable-release')
@depends('RUSTC_OPT_LEVEL', moz_optimize)
def rustc_opt_level(opt_level_option, moz_optimize):
if opt_level_option:
return opt_level_option[0]
else:
return '1' if moz_optimize.optimize else '0'
@depends(rustc_opt_level, debug_rust, '--enable-debug-symbols')
def rust_compiler_flags(opt_level, debug_rust, debug_symbols):
# Cargo currently supports only two interesting profiles for building:
# development and release. Those map (roughly) to --enable-debug and
# --disable-debug in Gecko, respectively.
#
# But we'd also like to support an additional axis of control for
# optimization level. Since Cargo only supports 2 profiles, we're in
# a bit of a bind.
#
# Code here derives various compiler options given other configure options.
# The options defined here effectively override defaults specified in
# Cargo.toml files.
debug_assertions = None
debug_info = None
# opt-level=0 implies -C debug-assertions, which may not be desired
# unless Rust debugging is enabled.
if opt_level == '0' and not debug_rust:
debug_assertions = False
if debug_symbols:
debug_info = '2'
opts = []
if opt_level is not None:
opts.append('opt-level=%s' % opt_level)
if debug_assertions is not None:
opts.append('debug-assertions=%s' %
('yes' if debug_assertions else 'no'))
if debug_info is not None:
opts.append('debuginfo=%s' % debug_info)
flags = []
for opt in opts:
flags.extend(['-C', opt])
return flags
set_config('MOZ_RUST_DEFAULT_FLAGS', rust_compiler_flags)
# Rust incremental compilation
# ==============================================================
@depends(rustc_opt_level, debug_rust, 'MOZ_AUTOMATION', code_coverage)
def cargo_incremental(opt_level, debug_rust, automation, code_coverage):
"""Return a value for the CARGO_INCREMENTAL environment variable."""
# We never want to use incremental compilation in automation. sccache
# handles our automation use case much better than incremental compilation
# would.
if automation:
return '0'
# Coverage instrumentation doesn't play well with incremental compilation
# https://github.com/rust-lang/rust/issues/50203.
if code_coverage:
return '0'
# Incremental compilation is automatically turned on for debug builds, so
# we don't need to do anything special here.
if debug_rust:
return
# --enable-release automatically sets -O2 for Rust code, and people can
# set RUSTC_OPT_LEVEL to 2 or even 3 if they want to profile Rust code.
# Let's assume that if Rust code is using -O2 or higher, we shouldn't
# be using incremental compilation, because we'd be imposing a
# significant runtime cost.
if opt_level not in ('0', '1'):
return
# We're clear to use incremental compilation!
return '1'
set_config('CARGO_INCREMENTAL', cargo_incremental)
# Linker detection
# ==============================================================
@depends(target)
def is_linker_option_enabled(target):
if target.kernel not in ('WINNT', 'SunOS'):
return True
option('--enable-gold',
env='MOZ_FORCE_GOLD',
help='Enable GNU Gold Linker when it is not already the default',
when=is_linker_option_enabled)
imply_option('--enable-linker', 'gold', when='--enable-gold')
js_option('--enable-linker', nargs=1,
help='Select the linker {bfd, gold, ld64, lld, lld-*}',
when=is_linker_option_enabled)
@depends('--enable-linker', c_compiler, developer_options, '--enable-gold',
extra_toolchain_flags, target, when=is_linker_option_enabled)
@checking('for linker', lambda x: x.KIND)
@imports('os')
@imports('shutil')
def select_linker(linker, c_compiler, developer_options, enable_gold,
toolchain_flags, target):
if linker:
linker = linker[0]
else:
linker = None
def is_valid_linker(linker):
if target.kernel == 'Darwin':
valid_linkers = ('ld64', 'lld')
else:
valid_linkers = ('bfd', 'gold', 'lld')
if linker in valid_linkers:
return True
if 'lld' in valid_linkers and linker.startswith('lld-'):
return True
return False
if linker and not is_valid_linker(linker):
# Check that we are trying to use a supported linker
die('Unsupported linker ' + linker)
# Check the kind of linker
version_check = ['-Wl,--version']
cmd_base = c_compiler.wrapper + [c_compiler.compiler] + c_compiler.flags
def try_linker(linker):
# Generate the compiler flag
if linker == 'ld64':
linker_flag = ['-fuse-ld=ld']
elif linker:
linker_flag = ["-fuse-ld=" + linker]
else:
linker_flag = []
cmd = cmd_base + linker_flag + version_check
if toolchain_flags:
cmd += toolchain_flags
# ld64 doesn't have anything to print out a version. It does print out
# "ld64: For information on command line options please use 'man ld'."
# but that would require doing two attempts, one with --version, that
# would fail, and another with --help.
# Instead, abuse its LD_PRINT_OPTIONS feature to detect a message
# specific to it on stderr when it fails to process --version.
env = dict(os.environ)
env['LD_PRINT_OPTIONS'] = '1'
retcode, stdout, stderr = get_cmd_output(*cmd, env=env)
cmd_output = stdout.decode('utf-8')
stderr = stderr.decode('utf-8')
if retcode == 1 and 'Logging ld64 options' in stderr:
kind = 'ld64'
elif retcode != 0:
return None
elif 'GNU ld' in cmd_output:
# We are using the normal linker
kind = 'bfd'
elif 'GNU gold' in cmd_output:
kind = 'gold'
elif 'LLD' in cmd_output:
kind = 'lld'
else:
kind = 'unknown'
return namespace(
KIND=kind,
LINKER_FLAG=linker_flag,
)
result = try_linker(linker)
if result is None:
if linker:
die("Could not use {} as linker".format(linker))
die("Failed to find a linker")
if (linker is None and enable_gold.origin == 'default' and
developer_options and result.KIND == 'bfd'):
# try and use lld if available.
tried = try_linker('lld')
if tried is None or tried.KIND != 'lld':
tried = try_linker('gold')
if tried is None or tried.KIND != 'gold':
tried = None
if tried:
result = tried
# If an explicit linker was given, error out if what we found is different.
if linker and not linker.startswith(result.KIND):
die("Could not use {} as linker".format(linker))
return result
set_config('LD_IS_BFD', depends(select_linker.KIND)
(lambda x: x == 'bfd' or None))
add_old_configure_assignment('LINKER_LDFLAGS', select_linker.LINKER_FLAG)
js_option('--enable-clang-plugin', env='ENABLE_CLANG_PLUGIN',
help="Enable building with the mozilla clang plugin")
add_old_configure_assignment('ENABLE_CLANG_PLUGIN',
depends_if('--enable-clang-plugin')(lambda _: True))
js_option('--enable-mozsearch-plugin', env='ENABLE_MOZSEARCH_PLUGIN',
help="Enable building with the mozsearch indexer plugin")
add_old_configure_assignment('ENABLE_MOZSEARCH_PLUGIN',
depends_if('--enable-mozsearch-plugin')(lambda _: True))
# Libstdc++ compatibility hacks
# ==============================================================
#
js_option('--enable-stdcxx-compat', env='MOZ_STDCXX_COMPAT',
help='Enable compatibility with older libstdc++')
@template
def libstdcxx_version(var, compiler, host_or_target):
@depends(compiler, host_or_target, when='--enable-stdcxx-compat')
@imports(_from='mozbuild.configure.libstdcxx', _import='find_version')
def version(compiler, host_or_target):
if host_or_target.os == 'Android':
return None
result = find_version(
compiler.wrapper + [compiler.compiler] + compiler.flags)
if result:
return str(result)
set_config(var, version)
return version
add_gcc_flag(
'-D_GLIBCXX_USE_CXX11_ABI=0', cxx_compiler,
when=libstdcxx_version(
'MOZ_LIBSTDCXX_TARGET_VERSION', cxx_compiler, target))
add_gcc_flag(
'-D_GLIBCXX_USE_CXX11_ABI=0', host_cxx_compiler,
when=libstdcxx_version(
'MOZ_LIBSTDCXX_HOST_VERSION', host_cxx_compiler, host))
@depends(c_compiler.try_compile(flags=['-fsanitize=fuzzer-no-link'],
check_msg='whether the C compiler supports -fsanitize=fuzzer-no-link'))
def have_libfuzzer_flag_fuzzer_no_link(value):
if value:
return True
set_config('HAVE_LIBFUZZER_FLAG_FUZZER_NO_LINK', have_libfuzzer_flag_fuzzer_no_link)