Remove almost all vestiges of USE_TYPED_ARRAYS.

This commit is contained in:
Bruce Mitchener 2015-04-10 21:21:07 +07:00
Родитель a657e8c99a
Коммит c0e750b7ca
22 изменённых файлов: 917 добавлений и 1584 удалений

61
emcc
Просмотреть файл

@ -722,11 +722,6 @@ try:
if is_minus_s_for_emcc(newargs, i):
settings_changes.append(newargs[i+1])
newargs[i] = newargs[i+1] = ''
elif newargs[i].startswith('--typed-arrays'):
assert '=' not in newargs[i], 'Invalid typed arrays parameter (do not use "=")'
settings_changes.append('USE_TYPED_ARRAYS=' + newargs[i+1])
newargs[i] = ''
newargs[i+1] = ''
newargs = [arg for arg in newargs if arg is not '']
if split_js_file:
@ -913,7 +908,8 @@ try:
assert shared.Settings.NAMED_GLOBALS == 0, 'named globals not supported in fastcomp'
assert shared.Settings.PGO == 0, 'pgo not supported in fastcomp'
assert shared.Settings.TARGET_ASMJS_UNKNOWN_EMSCRIPTEN == 1, 'fastcomp requires asmjs-unknown-emscripten'
assert shared.Settings.USE_TYPED_ARRAYS == 2, 'fastcomp assumes ta2'
assert shared.Settings.USE_TYPED_ARRAYS == 2, 'altering USE_TYPED_ARRAYS is not supported'
assert shared.Settings.QUANTUM_SIZE == 4, 'altering the QUANTUM_SIZE is not supported'
assert not split_js_file, '--split-js is deprecated and not supported in fastcomp'
assert shared.Settings.INIT_HEAP == 0, 'HEAP_INIT is not supported in fastcomp (and should never be needed except for debugging)'
assert not shared.Settings.RUNTIME_TYPE_INFO, 'RUNTIME_TYPE_INFO is not supported in fastcomp'
@ -1008,10 +1004,6 @@ try:
else:
raise Exception('unknown llvm target: ' + str(shared.LLVM_TARGET))
if shared.Settings.USE_TYPED_ARRAYS != 2 and llvm_opts > 0:
logging.warning('disabling LLVM optimizations, need typed arrays mode 2 for them')
llvm_opts = 0
if shared.Settings.MAIN_MODULE:
assert not shared.Settings.SIDE_MODULE
shared.Settings.INCLUDE_FULL_LIBRARY = 1
@ -1277,7 +1269,7 @@ try:
if DEBUG: save_intermediate('opt', 'bc')
# If we can LTO, do it before dce, since it opens up dce opportunities
if shared.Building.can_build_standalone() and llvm_lto and llvm_lto != 2 and shared.Building.can_use_unsafe_opts():
if shared.Building.can_build_standalone() and llvm_lto and llvm_lto != 2:
if not shared.Building.can_inline(): link_opts.append('-disable-inlining')
# add a manual internalize with the proper things we need to be kept alive during lto
link_opts += shared.Building.get_safe_internalize() + ['-std-link-opts']
@ -1384,32 +1376,29 @@ try:
js_transform_tempfiles = [final]
if memory_init_file:
if shared.Settings.USE_TYPED_ARRAYS != 2:
if type(memory_init_file) == int: logging.warning('memory init file requires typed arrays mode 2')
else:
memfile = target + '.mem'
shared.try_delete(memfile)
def repl(m):
# handle chunking of the memory initializer
s = m.groups(0)[0]
if len(s) == 0 and not shared.Settings.EMTERPRETIFY: return m.group(0) # emterpreter must have a mem init file; otherwise, don't emit 0-size ones
open(memfile, 'wb').write(''.join(map(lambda x: chr(int(x or '0')), s.split(','))))
if DEBUG:
# Copy into temp dir as well, so can be run there too
shared.safe_copy(memfile, os.path.join(shared.get_emscripten_temp_dir(), os.path.basename(memfile)))
return 'var memoryInitializer = "%s";' % os.path.basename(memfile)
src = re.sub(shared.JS.memory_initializer_pattern, repl, open(final).read(), count=1)
open(final + '.mem.js', 'w').write(src)
final += '.mem.js'
src = None
js_transform_tempfiles[-1] = final # simple text substitution preserves comment line number mappings
memfile = target + '.mem'
shared.try_delete(memfile)
def repl(m):
# handle chunking of the memory initializer
s = m.groups(0)[0]
if len(s) == 0 and not shared.Settings.EMTERPRETIFY: return m.group(0) # emterpreter must have a mem init file; otherwise, don't emit 0-size ones
open(memfile, 'wb').write(''.join(map(lambda x: chr(int(x or '0')), s.split(','))))
if DEBUG:
if os.path.exists(memfile):
save_intermediate('meminit')
logging.debug('wrote memory initialization to %s', memfile)
else:
logging.debug('did not see memory initialization')
elif shared.Settings.USE_TYPED_ARRAYS == 2 and not shared.Settings.MAIN_MODULE and not shared.Settings.SIDE_MODULE and debug_level < 4:
# Copy into temp dir as well, so can be run there too
shared.safe_copy(memfile, os.path.join(shared.get_emscripten_temp_dir(), os.path.basename(memfile)))
return 'var memoryInitializer = "%s";' % os.path.basename(memfile)
src = re.sub(shared.JS.memory_initializer_pattern, repl, open(final).read(), count=1)
open(final + '.mem.js', 'w').write(src)
final += '.mem.js'
src = None
js_transform_tempfiles[-1] = final # simple text substitution preserves comment line number mappings
if DEBUG:
if os.path.exists(memfile):
save_intermediate('meminit')
logging.debug('wrote memory initialization to %s', memfile)
else:
logging.debug('did not see memory initialization')
elif not shared.Settings.MAIN_MODULE and not shared.Settings.SIDE_MODULE and debug_level < 4:
# not writing a binary init, but we can at least optimize them by splitting them up
src = open(final).read()
src = shared.JS.optimize_initializer(src)

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -194,7 +194,6 @@ if (NO_BROWSER) {
// Settings sanity checks
assert(!(USE_TYPED_ARRAYS === 2 && QUANTUM_SIZE !== 4), 'For USE_TYPED_ARRAYS == 2, must have normal QUANTUM_SIZE of 4');
assert(!(!NAMED_GLOBALS && BUILD_AS_SHARED_LIB), 'shared libraries must have named globals');
// Output some info and warnings based on settings

Просмотреть файл

@ -1,124 +0,0 @@
diff --git a/src/analyzer.js b/src/analyzer.js
index 526d024..a5074e1 100644
--- a/src/analyzer.js
+++ b/src/analyzer.js
@@ -462,15 +462,17 @@ function analyzer(data) {
//
// See settings.js for the meaning of QUANTUM_SIZE. The issue we fix here is,
// to correct the .ll assembly code so that things work with QUANTUM_SIZE=1.
//
substrate.addActor('QuantumFixer', {
processItem: function(item) {
this.forwardItem(item, 'LabelAnalyzer');
+dprint('cheez qf!');
if (QUANTUM_SIZE !== 1) return;
+dprint('cheez qf 2!');
// ptrs: the indexes of parameters that are pointers, whose originalType is what we want
// bytes: the index of the 'bytes' parameter
// TODO: malloc, realloc?
var FIXABLE_CALLS = {
'memcpy': { ptrs: [0,1], bytes: 2 },
'memmove': { ptrs: [0,1], bytes: 2 },
@@ -495,16 +497,22 @@ function analyzer(data) {
return [0];
}
item.functions.forEach(function(func) {
function getOriginalType(param) {
function get() {
if (param.intertype === 'value' && !isNumber(param.ident)) {
- if (func.variables[param.ident]) {
- return func.variables[param.ident].originalType || null;
+ var varData = func.variables[param.ident];
+ if (varData) {
+ if (varData.origin == 'load') {
+ return varData.type; // we can trust this
+ }
+// To continue, we need proper type data inside getelementptr, by using type data. not a one liner.
+// TESTING: this makes a difference in fasta
+ return varData.originalType || null;
} else {
return item.globalVariables[param.ident].originalType;
}
} else if (param.intertype === 'bitcast') {
return param.params[0].type;
} else if (param.intertype === 'getelementptr') {
if (param.params[0].type[0] === '[') return param.params[0].type;
@@ -527,18 +535,21 @@ function analyzer(data) {
var fixData = FIXABLE_CALLS[funcIdent];
if (!fixData) return;
var ptrs = fixData.ptrs.map(function(ptr) { return line.params[ptr] });
var bytes = line.params[fixData.bytes].ident;
// Only consider original types. This assumes memcpy always has pointers bitcast to i8*
var originalTypes = ptrs.map(getOriginalType);
+dprint('cheez qf 2! ' + dump(func.variables) + ' :: ' + funcIdent + ',' + line.lineNum + ' : ' + dump(ptrs) + ' ==> ' + dump(originalTypes));
for (var i = 0; i < originalTypes.length; i++) {
if (!originalTypes[i]) return;
}
+dprint('cheez qf 3!');
originalTypes = originalTypes.map(function(type) { return removePointing(type) });
+dprint('zz cheez ' + [funcIdent, originalTypes]);
var sizes = originalTypes.map(function(type) { return getSize(Types.types, type) });
var fatSizes = originalTypes.map(function(type) { return getSize(Types.fatTypes, type, true) });
// The sizes may not be identical, if we copy a descendant class into a parent class. We use
// the smaller size in that case. However, this may also be a bug, it is hard to tell, hence a warning
warn(dedup(sizes).length === 1, 'All sizes should probably be identical here: ' + dump(originalTypes) + ':' + dump(sizes) + ':' +
line.lineNum);
warn(dedup(fatSizes).length === 1, 'All fat sizes should probably be identical here: ' + dump(originalTypes) + ':' + dump(sizes) + ':' +
diff --git a/src/parseTools.js b/src/parseTools.js
index 024026c..a15b89f 100644
--- a/src/parseTools.js
+++ b/src/parseTools.js
@@ -1078,14 +1078,29 @@ function makeSetValues(ptr, pos, value, type, num) {
'}'
}
}
var TYPED_ARRAY_SET_MIN = Infinity; // .set() as memcpy seems to just slow us down
function makeCopyValues(dest, src, num, type, modifier) {
+ return '/* zz1 */ ' + makeCopyValuesX(dest, src, num, type, modifier) + ';' +
+ makeCopyValuesX(dest, src, num, type, modifier) + ';' +
+ makeCopyValuesX(dest, src, num, type, modifier) + ';' +
+ makeCopyValuesX(dest, src, num, type, modifier) + ';' +
+ makeCopyValuesX(dest, src, num, type, modifier) + ';' +
+ makeCopyValuesX(dest, src, num, type, modifier) + ';' +
+ makeCopyValuesX(dest, src, num, type, modifier) + ';' +
+ makeCopyValuesX(dest, src, num, type, modifier) + ';' +
+ makeCopyValuesX(dest, src, num, type, modifier) + ';' +
+ makeCopyValuesX(dest, src, num, type, modifier) + ';' +
+ makeCopyValuesX(dest, src, num, type, modifier) + ';' +
+ makeCopyValuesX(dest, src, num, type, modifier) + '; /* zz2*/';
+}
+
+function makeCopyValuesX(dest, src, num, type, modifier) {
function safety(to, from) {
to = to || (dest + '+' + 'mcpi');
from = from || (src + '+' + 'mcpi');
return (SAFE_HEAP ? 'SAFE_HEAP_COPY_HISTORY(' + to + ', ' + from + ')' : '');
}
if (USE_TYPED_ARRAYS <= 1) {
if (isNumber(num)) {
diff --git a/src/utility.js b/src/utility.js
index 1ebbe52..1724c62 100644
--- a/src/utility.js
+++ b/src/utility.js
@@ -15,15 +15,15 @@ function dump(item) {
while (text.length > 80) {
ret += '// ' + text.substr(0,80) + '\n';
text = text.substr(80);
}
return ret + '// ' + text;
}
try {
- return lineify(JSON.stringify(item).substr(0, 80*25));
+ return lineify(JSON.stringify(item).substr(0, 800*25));
} catch(e) {
var ret = [];
for (var i in item) {
var j = item[i];
if (typeof j === 'string' || typeof j === 'number') {
ret.push(i + ': ' + j);
} else {

Просмотреть файл

@ -902,13 +902,11 @@ function intertyper(lines, sidePass, baseLineNums) {
item.type = 'i1';
if (item.params[1].intertype === 'type') item.params[1].intertype = 'value'; // parsed as type, but comparisons have just values there
}
if (USE_TYPED_ARRAYS == 2) {
// Some specific corrections, since 'i64' is special
if (item.op in LLVM.SHIFTS) {
item.params[1].type = 'i32';
} else if (item.op == 'select') {
item.params[0].type = 'i1';
}
// Some specific corrections, since 'i64' is special
if (item.op in LLVM.SHIFTS) {
item.params[1].type = 'i32';
} else if (item.op == 'select') {
item.params[0].type = 'i1';
}
Types.needAnalysis[item.type] = 0;
return item;

Просмотреть файл

@ -147,7 +147,7 @@ function JSify(data, functionsOnly) {
}
// Add current value(s)
var currValue = values[i];
if (USE_TYPED_ARRAYS == 2 && (typeData.fields[i] == 'i64' || (typeData.flatFactor && typeData.fields[0] == 'i64'))) {
if ((typeData.fields[i] == 'i64' || (typeData.flatFactor && typeData.fields[0] == 'i64'))) {
// 'flatten' out the 64-bit value into two 32-bit halves
var parts = parseI64Constant(currValue, true);
ret[index++] = parts[0];
@ -1260,7 +1260,7 @@ function JSify(data, functionsOnly) {
ret = makeVarArgsCleanup(ret);
if (item.assignTo) {
var illegal = USE_TYPED_ARRAYS == 2 && isIllegalType(item.type);
var illegal = isIllegalType(item.type);
var assignTo = illegal ? item.assignTo + '$r' : item.assignTo;
ret = makeVarDef(assignTo) + '=' + ret;
if (ASM_JS) addVariable(assignTo, item.type);
@ -1304,7 +1304,7 @@ function JSify(data, functionsOnly) {
addVariable(item.assignTo + '$0', 'i32');
addVariable(item.assignTo + '$1', 'i32');
}
if (DISABLE_EXCEPTION_CATCHING && !(item.funcData.ident in EXCEPTION_CATCHING_WHITELIST) && USE_TYPED_ARRAYS == 2) {
if (DISABLE_EXCEPTION_CATCHING && !(item.funcData.ident in EXCEPTION_CATCHING_WHITELIST)) {
ret = makeVarDef(item.assignTo) + '$0 = 0; ' + makeVarDef(item.assignTo) + '$1 = 0;';
item.assignTo = null;
if (VERBOSE) warnOnce('landingpad, but exceptions are disabled!');
@ -1312,10 +1312,8 @@ function JSify(data, functionsOnly) {
}
var catchTypeArray = item.catchables.map(finalizeLLVMParameter).map(function(element) { return asmCoercion(element, 'i32') }).join(',');
var ret = asmCoercion('___cxa_find_matching_catch(' + catchTypeArray +')', 'i32');
if (USE_TYPED_ARRAYS == 2) {
ret = makeVarDef(item.assignTo) + '$0 = ' + ret + '; ' + makeVarDef(item.assignTo) + '$1 = tempRet0;';
item.assignTo = null;
}
ret = makeVarDef(item.assignTo) + '$0 = ' + ret + '; ' + makeVarDef(item.assignTo) + '$1 = tempRet0;';
item.assignTo = null;
return ret;
}
function loadHandler(item) {
@ -1352,7 +1350,7 @@ function JSify(data, functionsOnly) {
// and we emulate them using simple JS objects { f1: , f2: , } etc., for speed
var index = item.indexes[0][0].text;
var valueType = Types.types[item.type].fields[index];
if (USE_TYPED_ARRAYS != 2 || valueType != 'i64') {
if (valueType != 'i64') {
return item.ident + '.f' + index;
} else {
var assignTo = item.assignTo;
@ -1764,27 +1762,25 @@ function JSify(data, functionsOnly) {
print('}\n');
}
if (USE_TYPED_ARRAYS == 2) {
if (!BUILD_AS_SHARED_LIB && !SIDE_MODULE) {
print('var tempDoublePtr = Runtime.alignMemory(allocate(12, "i8", ALLOC_STATIC), 8);\n');
print('assert(tempDoublePtr % 8 == 0);\n');
print('function copyTempFloat(ptr) { // functions, because inlining this code increases code size too much\n');
print(' HEAP8[tempDoublePtr] = HEAP8[ptr];\n');
print(' HEAP8[tempDoublePtr+1] = HEAP8[ptr+1];\n');
print(' HEAP8[tempDoublePtr+2] = HEAP8[ptr+2];\n');
print(' HEAP8[tempDoublePtr+3] = HEAP8[ptr+3];\n');
print('}\n');
print('function copyTempDouble(ptr) {\n');
print(' HEAP8[tempDoublePtr] = HEAP8[ptr];\n');
print(' HEAP8[tempDoublePtr+1] = HEAP8[ptr+1];\n');
print(' HEAP8[tempDoublePtr+2] = HEAP8[ptr+2];\n');
print(' HEAP8[tempDoublePtr+3] = HEAP8[ptr+3];\n');
print(' HEAP8[tempDoublePtr+4] = HEAP8[ptr+4];\n');
print(' HEAP8[tempDoublePtr+5] = HEAP8[ptr+5];\n');
print(' HEAP8[tempDoublePtr+6] = HEAP8[ptr+6];\n');
print(' HEAP8[tempDoublePtr+7] = HEAP8[ptr+7];\n');
print('}\n');
}
if (!BUILD_AS_SHARED_LIB && !SIDE_MODULE) {
print('var tempDoublePtr = Runtime.alignMemory(allocate(12, "i8", ALLOC_STATIC), 8);\n');
print('assert(tempDoublePtr % 8 == 0);\n');
print('function copyTempFloat(ptr) { // functions, because inlining this code increases code size too much\n');
print(' HEAP8[tempDoublePtr] = HEAP8[ptr];\n');
print(' HEAP8[tempDoublePtr+1] = HEAP8[ptr+1];\n');
print(' HEAP8[tempDoublePtr+2] = HEAP8[ptr+2];\n');
print(' HEAP8[tempDoublePtr+3] = HEAP8[ptr+3];\n');
print('}\n');
print('function copyTempDouble(ptr) {\n');
print(' HEAP8[tempDoublePtr] = HEAP8[ptr];\n');
print(' HEAP8[tempDoublePtr+1] = HEAP8[ptr+1];\n');
print(' HEAP8[tempDoublePtr+2] = HEAP8[ptr+2];\n');
print(' HEAP8[tempDoublePtr+3] = HEAP8[ptr+3];\n');
print(' HEAP8[tempDoublePtr+4] = HEAP8[ptr+4];\n');
print(' HEAP8[tempDoublePtr+5] = HEAP8[ptr+5];\n');
print(' HEAP8[tempDoublePtr+6] = HEAP8[ptr+6];\n');
print(' HEAP8[tempDoublePtr+7] = HEAP8[ptr+7];\n');
print('}\n');
}
}

Просмотреть файл

@ -1008,11 +1008,6 @@ LibraryManager.library = {
}
try {
var slab = {{{ makeGetSlabs('buf', 'i8', true) }}};
#if SAFE_HEAP
#if USE_TYPED_ARRAYS == 0
SAFE_HEAP_FILL_HISTORY(buf, buf+nbyte, 'i8'); // VFS does not use makeSetValues, so we need to do it manually
#endif
#endif
return FS.read(stream, slab, buf, nbyte, offset);
} catch (e) {
FS.handleFSError(e);
@ -1037,11 +1032,6 @@ LibraryManager.library = {
try {
var slab = {{{ makeGetSlabs('buf', 'i8', true) }}};
#if SAFE_HEAP
#if USE_TYPED_ARRAYS == 0
SAFE_HEAP_FILL_HISTORY(buf, buf+nbyte, 'i8'); // VFS does not use makeSetValues, so we need to do it manually
#endif
#endif
return FS.read(stream, slab, buf, nbyte);
} catch (e) {
FS.handleFSError(e);
@ -1143,11 +1133,6 @@ LibraryManager.library = {
}
try {
var slab = {{{ makeGetSlabs('buf', 'i8', true) }}};
#if SAFE_HEAP
#if USE_TYPED_ARRAYS == 0
SAFE_HEAP_FILL_HISTORY(buf, buf+nbyte, 'i8'); // VFS does not use makeSetValues, so we need to do it manually
#endif
#endif
return FS.write(stream, slab, buf, nbyte, offset);
} catch (e) {
FS.handleFSError(e);
@ -1172,11 +1157,6 @@ LibraryManager.library = {
try {
var slab = {{{ makeGetSlabs('buf', 'i8', true) }}};
#if SAFE_HEAP
#if USE_TYPED_ARRAYS == 0
SAFE_HEAP_FILL_HISTORY(buf, buf+nbyte, 'i8'); // VFS does not use makeSetValues, so we need to do it manually
#endif
#endif
return FS.write(stream, slab, buf, nbyte);
} catch (e) {
FS.handleFSError(e);
@ -2645,17 +2625,11 @@ LibraryManager.library = {
abs: 'Math_abs',
labs: 'Math_abs',
#if USE_TYPED_ARRAYS == 2
llabs__deps: [function() { Types.preciseI64MathUsed = 1 }],
llabs: function(lo, hi) {
i64Math.abs(lo, hi);
{{{ makeStructuralReturn([makeGetTempDouble(0, 'i32'), makeGetTempDouble(1, 'i32')]) }}};
},
#else
llabs: function(lo, hi) {
throw 'unsupported llabs';
},
#endif
exit__deps: ['_exit'],
exit: function(status) {
@ -2766,15 +2740,12 @@ LibraryManager.library = {
___setErrNo(ERRNO_CODES.ERANGE);
}
#if USE_TYPED_ARRAYS == 2
if (bits == 64) {
{{{ makeStructuralReturn(splitI64('ret')) }}};
}
#endif
return ret;
},
#if USE_TYPED_ARRAYS == 2
_parseInt64__deps: ['isspace', '__setErrNo', '$ERRNO_CODES', function() { Types.preciseI64MathUsed = 1 }],
_parseInt64: function(str, endptr, base, min, max, unsign) {
var isNegative = false;
@ -2845,7 +2816,6 @@ LibraryManager.library = {
{{{ makeStructuralReturn([makeGetTempDouble(0, 'i32'), makeGetTempDouble(1, 'i32')]) }}};
},
#endif
environ__deps: ['$ENV'],
environ: 'allocate(1, "i32*", ALLOC_STATIC)',
__environ__deps: ['environ'],
@ -3048,20 +3018,9 @@ LibraryManager.library = {
memcpy__sig: 'iiii',
memcpy__deps: ['emscripten_memcpy_big'],
memcpy: function(dest, src, num) {
#if USE_TYPED_ARRAYS == 0
{{{ makeCopyValues('dest', 'src', 'num', 'null') }}};
return num;
#endif
#if USE_TYPED_ARRAYS == 1
{{{ makeCopyValues('dest', 'src', 'num', 'null') }}};
return num;
#endif
dest = dest|0; src = src|0; num = num|0;
var ret = 0;
#if USE_TYPED_ARRAYS
if ((num|0) >= 4096) return _emscripten_memcpy_big(dest|0, src|0, num|0)|0;
#endif
ret = dest|0;
if ((dest&3) == (src&3)) {
while (dest & 3) {
@ -3126,7 +3085,6 @@ LibraryManager.library = {
memset__sig: 'iiii',
memset__asm: true,
memset: function(ptr, value, num) {
#if USE_TYPED_ARRAYS == 2
ptr = ptr|0; value = value|0; num = num|0;
var stop = 0, value4 = 0, stop4 = 0, unaligned = 0;
stop = (ptr + num)|0;
@ -3153,10 +3111,6 @@ LibraryManager.library = {
ptr = (ptr+1)|0;
}
return (ptr-num)|0;
#else
{{{ makeSetValues('ptr', '0', 'value', 'null', 'num') }}};
return ptr;
#endif
},
llvm_memset_i32: 'memset',
llvm_memset_p0i8_i32: 'memset',
@ -3370,11 +3324,7 @@ LibraryManager.library = {
llvm_bswap_i64: function(l, h) {
var retl = _llvm_bswap_i32(h)>>>0;
var reth = _llvm_bswap_i32(l)>>>0;
#if USE_TYPED_ARRAYS == 2
{{{ makeStructuralReturn(['retl', 'reth']) }}};
#else
throw 'unsupported';
#endif
},
llvm_ctlz_i64__asm: true,
@ -3418,11 +3368,7 @@ LibraryManager.library = {
llvm_cttz_i64: function(l, h) {
var ret = _llvm_cttz_i32(l);
if (ret == 32) ret += _llvm_cttz_i32(h);
#if USE_TYPED_ARRAYS == 2
{{{ makeStructuralReturn(['ret', '0']) }}};
#else
return ret;
#endif
},
llvm_ctpop_i32: function(x) {
@ -3464,12 +3410,6 @@ LibraryManager.library = {
__cxa_guard_release: function() {},
__cxa_guard_abort: function() {},
#if USE_TYPED_ARRAYS != 2
_ZTVN10__cxxabiv119__pointer_type_infoE: [0], // is a pointer
_ZTVN10__cxxabiv117__class_type_infoE: [1], // no inherited classes
_ZTVN10__cxxabiv120__si_class_type_infoE: [2], // yes inherited classes
#endif
$EXCEPTIONS: {
last: 0,
caught: [],
@ -3552,20 +3492,6 @@ LibraryManager.library = {
__cxa_throw__sig: 'viii',
__cxa_throw__deps: ['_ZSt18uncaught_exceptionv', '__cxa_find_matching_catch', '$EXCEPTIONS'],
__cxa_throw: function(ptr, type, destructor) {
#if USE_TYPED_ARRAYS != 2
if (!___cxa_throw.initialized) {
try {
{{{ makeSetValue(makeGlobalUse('__ZTVN10__cxxabiv119__pointer_type_infoE'), '0', '0', 'i32') }}}; // Workaround for libcxxabi integration bug
} catch(e){}
try {
{{{ makeSetValue(makeGlobalUse('__ZTVN10__cxxabiv117__class_type_infoE'), '0', '1', 'i32') }}}; // Workaround for libcxxabi integration bug
} catch(e){}
try {
{{{ makeSetValue(makeGlobalUse('__ZTVN10__cxxabiv120__si_class_type_infoE'), '0', '2', 'i32') }}}; // Workaround for libcxxabi integration bug
} catch(e){}
___cxa_throw.initialized = true;
}
#endif
#if EXCEPTION_DEBUG
Module.printErr('Compiled code throwing an exception, ' + [ptr,type,destructor]);
#endif

Просмотреть файл

@ -160,12 +160,10 @@ mergeInto(LibraryManager.library, {
var origArg = currArg;
#endif
var argText;
#if USE_TYPED_ARRAYS == 2
// Flatten i64-1 [low, high] into a (slightly rounded) double
if (argSize == 8) {
currArg = Runtime.makeBigInt(currArg[0], currArg[1], next == {{{ charCode('u') }}});
}
#endif
// Truncate to requested size.
if (argSize <= 4) {
var limit = Math.pow(256, argSize) - 1;

Просмотреть файл

@ -629,10 +629,6 @@ var LibraryGL = {
// Returns the context handle to the new context.
createContext: function(canvas, webGLContextAttributes) {
#if !USE_TYPED_ARRAYS
Module.print('(USE_TYPED_ARRAYS needs to be enabled for WebGL)');
return null;
#endif
if (typeof webGLContextAttributes.majorVersion === 'undefined' && typeof webGLContextAttributes.minorVersion === 'undefined') {
#if USE_WEBGL2
webGLContextAttributes.majorVersion = 2;

Просмотреть файл

@ -89,31 +89,25 @@ mergeInto(LibraryManager.library, {
// Given a file node, returns its file data converted to a regular JS array. You should treat this as read-only.
getFileDataAsRegularArray: function(node) {
#if USE_TYPED_ARRAYS == 2
if (node.contents && node.contents.subarray) {
var arr = [];
for (var i = 0; i < node.usedBytes; ++i) arr.push(node.contents[i]);
return arr; // Returns a copy of the original data.
}
#endif
return node.contents; // No-op, the file contents are already in a JS array. Return as-is.
},
#if USE_TYPED_ARRAYS == 2
// Given a file node, returns its file data converted to a typed array.
getFileDataAsTypedArray: function(node) {
if (!node.contents) return new Uint8Array;
if (node.contents.subarray) return node.contents.subarray(0, node.usedBytes); // Make sure to not return excess unused bytes.
return new Uint8Array(node.contents);
},
#endif
// Allocates a new backing store for the given node so that it can fit at least newSize amount of bytes.
// May allocate more, to provide automatic geometric increase and amortized linear performance appending writes.
// Never shrinks the storage.
expandFileStorage: function(node, newCapacity) {
#if USE_TYPED_ARRAYS == 2
#if !MEMFS_APPEND_TO_TYPED_ARRAYS
// If we are asked to expand the size of a file that already exists, revert to using a standard JS array to store the file
// instead of a typed array. This makes resizing the array more flexible because we can just .push() elements at the back to
@ -138,7 +132,6 @@ mergeInto(LibraryManager.library, {
if (node.usedBytes > 0) node.contents.set(oldContents.subarray(0, node.usedBytes), 0); // Copy old data over to the new storage.
return;
}
#endif
// Not using a typed array to back the file storage. Use a standard JS array instead.
if (!node.contents && newCapacity > 0) node.contents = [];
while (node.contents.length < newCapacity) node.contents.push(0);
@ -152,8 +145,6 @@ mergeInto(LibraryManager.library, {
node.usedBytes = 0;
return;
}
#if USE_TYPED_ARRAYS == 2
if (!node.contents || node.contents.subarray) { // Resize a typed array if that is being used as the backing store.
var oldContents = node.contents;
node.contents = new Uint8Array(new ArrayBuffer(newSize)); // Allocate new storage.
@ -163,7 +154,6 @@ mergeInto(LibraryManager.library, {
node.usedBytes = newSize;
return;
}
#endif
// Backing with a JS array.
if (!node.contents) node.contents = [];
if (node.contents.length > newSize) node.contents.length = newSize;
@ -275,12 +265,9 @@ mergeInto(LibraryManager.library, {
if (position >= stream.node.usedBytes) return 0;
var size = Math.min(stream.node.usedBytes - position, length);
assert(size >= 0);
#if USE_TYPED_ARRAYS == 2
if (size > 8 && contents.subarray) { // non-trivial, and typed array
buffer.set(contents.subarray(position, position + size), offset);
} else
#endif
{
} else {
for (var i = 0; i < size; i++) buffer[offset + i] = contents[position + i];
}
return size;
@ -292,7 +279,6 @@ mergeInto(LibraryManager.library, {
var node = stream.node;
node.timestamp = Date.now();
#if USE_TYPED_ARRAYS == 2
if (buffer.subarray && (!node.contents || node.contents.subarray)) { // This write is from a typed array to a typed array?
if (canOwn) { // Can we just reuse the buffer we are given?
#if ASSERTIONS
@ -310,16 +296,15 @@ mergeInto(LibraryManager.library, {
return length;
}
}
#endif
// Appending to an existing file and we need to reallocate, or source data did not come as a typed array.
MEMFS.expandFileStorage(node, position+length);
#if USE_TYPED_ARRAYS == 2
if (node.contents.subarray && buffer.subarray) node.contents.set(buffer.subarray(offset, offset + length), position); // Use typed array write if available.
else
#endif
else {
for (var i = 0; i < length; i++) {
node.contents[position + i] = buffer[offset + i]; // Or fall back to manual write if not.
}
}
node.usedBytes = Math.max(node.usedBytes, position+length);
return length;
},

Просмотреть файл

@ -1538,14 +1538,7 @@ var LibrarySDL = {
// }
throw 'CopyOnLock is not supported for SDL_LockSurface with SDL_HWPALETTE flag set' + new Error().stack;
} else {
#if USE_TYPED_ARRAYS == 2
HEAPU8.set(surfData.image.data, surfData.buffer);
#else
var num2 = surfData.image.data.length;
for (var i = 0; i < num2; i++) {
{{{ makeSetValue('surfData.buffer', 'i', 'surfData.image.data[i]', 'i8') }}};
}
#endif
HEAPU8.set(surfData.image.data, surfData.buffer);
}
}
@ -1568,7 +1561,6 @@ var LibrarySDL = {
} else if (!surfData.colors) {
var data = surfData.image.data;
var buffer = surfData.buffer;
#if USE_TYPED_ARRAYS == 2
assert(buffer % 4 == 0, 'Invalid buffer offset: ' + buffer);
var src = buffer >> 2;
var dst = 0;
@ -1635,15 +1627,6 @@ var LibrarySDL = {
data32.set(HEAP32.subarray(src, src + data32.length));
}
}
#else
var num = surfData.image.data.length;
for (var i = 0; i < num; i++) {
// We may need to correct signs here. Potentially you can hardcode a write of 255 to alpha, say, and
// the compiler may decide to write -1 in the llvm bitcode...
data[i] = {{{ makeGetValue('buffer', 'i', 'i8', null, true) }}};
if (i % 4 == 3) data[i] = 0xff;
}
#endif
} else {
var width = Module['canvas'].width;
var height = Module['canvas'].height;

Просмотреть файл

@ -104,13 +104,7 @@ mergeInto(LibraryManager.library, {
// socket is closed
return 0;
}
#if USE_TYPED_ARRAYS == 2
buffer.set(msg.buffer, offset);
#else
for (var i = 0; i < size; i++) {
buffer[offset + i] = msg.buffer[i];
}
#endif
return msg.buffer.length;
},
write: function(stream, buffer, offset, length, position /* ignored */) {

Просмотреть файл

@ -19,7 +19,7 @@ var LLVM = {
EXTENDS: set('sext', 'zext'),
COMPS: set('icmp', 'fcmp'),
CONVERSIONS: set('inttoptr', 'ptrtoint', 'uitofp', 'sitofp', 'fptosi', 'fptoui', 'fpext', 'fptrunc'),
INTRINSICS_32: set('_llvm_memcpy_p0i8_p0i8_i64', '_llvm_memmove_p0i8_p0i8_i64', '_llvm_memset_p0i8_i64'), // intrinsics that need args converted to i32 in USE_TYPED_ARRAYS == 2
INTRINSICS_32: set('_llvm_memcpy_p0i8_p0i8_i64', '_llvm_memmove_p0i8_p0i8_i64', '_llvm_memset_p0i8_i64'), // intrinsics that need args converted to i32
MATHOP_IGNORABLES: set('exact', 'nnan', 'ninf', 'nsz', 'arcp', 'fast'),
PARAM_IGNORABLES: set('nocapture', 'readonly', 'readnone'),
};

Просмотреть файл

@ -770,17 +770,12 @@ function makeInlineCalculation(expression, value, tempVar) {
// Makes a proper runtime value for a 64-bit value from low and high i32s. low and high are assumed to be unsigned.
function makeI64(low, high) {
high = high || '0';
if (USE_TYPED_ARRAYS == 2) {
return '[' + makeSignOp(low, 'i32', 'un', 1, 1) + ',' + makeSignOp(high, 'i32', 'un', 1, 1) + ']';
} else {
if (high) return RuntimeGenerator.makeBigInt(low, high);
return low;
}
return '[' + makeSignOp(low, 'i32', 'un', 1, 1) + ',' + makeSignOp(high, 'i32', 'un', 1, 1) + ']';
}
// XXX Make all i64 parts signed
// Splits a number (an integer in a double, possibly > 32 bits) into an USE_TYPED_ARRAYS == 2 i64 value.
// Splits a number (an integer in a double, possibly > 32 bits) into an i64 value, represented by a low and high i32 pair.
// Will suffer from rounding. mergeI64 does the opposite.
function splitI64(value, floatConversion) {
// general idea:
@ -817,7 +812,6 @@ function splitI64(value, floatConversion) {
}
}
function mergeI64(value, unsigned) {
assert(USE_TYPED_ARRAYS == 2);
if (legalizedI64s) {
return RuntimeGenerator.makeBigInt(value + '$0', value + '$1', unsigned);
} else {
@ -828,12 +822,10 @@ function mergeI64(value, unsigned) {
// Takes an i64 value and changes it into the [low, high] form used in i64 mode 1. In that
// mode, this is a no-op
function ensureI64_1(value) {
if (USE_TYPED_ARRAYS == 2) return value;
return splitI64(value, 1);
return value;
}
function makeCopyI64(value) {
assert(USE_TYPED_ARRAYS == 2);
return value + '.slice(0)';
}
@ -956,7 +948,7 @@ function parseNumerical(value, type) {
// Hexadecimal double value, as the llvm docs say,
// "The one non-intuitive notation for constants is the hexadecimal form of floating point constants."
value = IEEEUnHex(value);
} else if (USE_TYPED_ARRAYS == 2 && isIllegalType(type)) {
} else if (isIllegalType(type)) {
return value; // do not parseFloat etc., that can lead to loss of precision
} else if (value === 'null') {
// NULL *is* 0, in C/C++. No JS null! (null == 0 is false, etc.)
@ -1035,7 +1027,7 @@ function calcAllocatedSize(type) {
function generateStructTypes(type) {
if (isArray(type)) return type; // already in the form of [type, type,...]
if (Compiletime.isNumberType(type) || isPointerType(type)) {
if (USE_TYPED_ARRAYS == 2 && type == 'i64') {
if (type == 'i64') {
return ['i64', 0, 0, 0, 'i32', 0, 0, 0];
}
return [type].concat(zeros(Runtime.getNativeFieldSize(type)-1));
@ -1054,7 +1046,7 @@ function generateStructTypes(type) {
var type = array ? typeData.fields[0] : typeData.fields[i];
if (!SAFE_HEAP && isPointerType(type)) type = '*'; // do not include unneeded type names without safe heap
if (Compiletime.isNumberType(type) || isPointerType(type)) {
if (USE_TYPED_ARRAYS == 2 && type == 'i64') {
if (type == 'i64') {
ret[index++] = 'i64';
ret[index++] = 0;
ret[index++] = 0;
@ -1157,10 +1149,6 @@ function checkSafeHeap() {
}
function getHeapOffset(offset, type, forceAsm) {
if (USE_TYPED_ARRAYS !== 2) {
return offset;
}
if (Runtime.getNativeFieldSize(type) > 4) {
if (type == 'i64' || TARGET_X86) {
type = 'i32'; // XXX we emulate 64-bit values as 32 in x86, and also in asmjs-unknown-emscripten but only i64, not double
@ -1286,13 +1274,13 @@ function makeGetValue(ptr, pos, type, noNeedFirst, unsigned, ignore, align, noSa
// In double mode 1, in x86 we always assume unaligned because we can't trust that; otherwise in asmjs-unknown-emscripten
// we need this code path if we are not fully aligned.
if (DOUBLE_MODE == 1 && USE_TYPED_ARRAYS == 2 && type == 'double' && (TARGET_X86 || align < 8)) {
if (DOUBLE_MODE == 1 && type == 'double' && (TARGET_X86 || align < 8)) {
return '(' + makeSetTempDouble(0, 'i32', makeGetValue(ptr, pos, 'i32', noNeedFirst, unsigned, ignore, align, noSafe)) + ',' +
makeSetTempDouble(1, 'i32', makeGetValue(ptr, getFastValue(pos, '+', Runtime.getNativeTypeSize('i32')), 'i32', noNeedFirst, unsigned, ignore, align, noSafe)) + ',' +
makeGetTempDouble(0, 'double') + ')';
}
if (USE_TYPED_ARRAYS == 2 && align) {
if (align) {
// Alignment is important here. May need to split this up
var bytes = Runtime.getNativeTypeSize(type);
if (DOUBLE_MODE == 0 && type == 'double') bytes = 4; // we will really only read 4 bytes here
@ -1394,11 +1382,11 @@ function makeSetValue(ptr, pos, value, type, noNeedFirst, ignore, align, noSafe,
return ret.join('; ');
}
if (DOUBLE_MODE == 1 && USE_TYPED_ARRAYS == 2 && type == 'double' && (TARGET_X86 || align < 8)) {
if (DOUBLE_MODE == 1 && type == 'double' && (TARGET_X86 || align < 8)) {
return '(' + makeSetTempDouble(0, 'double', value) + ',' +
makeSetValue(ptr, pos, makeGetTempDouble(0, 'i32'), 'i32', noNeedFirst, ignore, align, noSafe, ',') + ',' +
makeSetValue(ptr, getFastValue(pos, '+', Runtime.getNativeTypeSize('i32')), makeGetTempDouble(1, 'i32'), 'i32', noNeedFirst, ignore, align, noSafe, ',') + ')';
} else if (USE_TYPED_ARRAYS == 2 && type == 'i64') {
} else if (type == 'i64') {
return '(tempI64 = [' + splitI64(value) + '],' +
makeSetValue(ptr, pos, 'tempI64[0]', 'i32', noNeedFirst, ignore, align, noSafe, ',') + ',' +
makeSetValue(ptr, getFastValue(pos, '+', Runtime.getNativeTypeSize('i32')), 'tempI64[1]', 'i32', noNeedFirst, ignore, align, noSafe, ',') + ')';
@ -1406,7 +1394,7 @@ function makeSetValue(ptr, pos, value, type, noNeedFirst, ignore, align, noSafe,
var bits = getBits(type);
var needSplitting = bits > 0 && !isPowerOfTwo(bits); // an unnatural type like i24
if (USE_TYPED_ARRAYS == 2 && (align || needSplitting)) {
if (align || needSplitting) {
// Alignment is important here, or we need to split this up for other reasons.
var bytes = Runtime.getNativeTypeSize(type);
if (DOUBLE_MODE == 0 && type == 'double') bytes = 4; // we will really only read 4 bytes here
@ -1464,37 +1452,29 @@ function makeSetValues(ptr, pos, value, type, num, align) {
return makeSetValue(ptr, getFastValue(pos, '+', i*jump), value$, type);
}).join('; ');
}
if (USE_TYPED_ARRAYS <= 1) {
if (isNumber(num) && parseInt(num) <= UNROLL_LOOP_MAX) {
return unroll(type, num);
}
return 'for (var $$dest = ' + getFastValue(ptr, '+', pos) + ', $$stop = $$dest + ' + num + '; $$dest < $$stop; $$dest++) {\n' +
makeSetValue('$$dest', '0', value, type) + '\n}';
} else { // USE_TYPED_ARRAYS == 2
// If we don't know how to handle this at compile-time, or handling it is best done in a large amount of code, call memset
// TODO: optimize the case of numeric num but non-numeric value
if (!isNumber(num) || !isNumber(value) || (parseInt(num)/align >= UNROLL_LOOP_MAX)) {
return '_memset(' + asmCoercion(getFastValue(ptr, '+', pos), 'i32') + ', ' + asmCoercion(value, 'i32') + ', ' + asmCoercion(num, 'i32') + ')|0';
}
num = parseInt(num);
value = parseInt(value);
if (value < 0) value += 256; // make it unsigned
var values = {
1: value,
2: value | (value << 8),
4: value | (value << 8) | (value << 16) | (value << 24)
};
var ret = [];
[4, 2, 1].forEach(function(possibleAlign) {
if (num == 0) return;
if (align >= possibleAlign) {
ret.push(unroll('i' + (possibleAlign*8), Math.floor(num/possibleAlign), possibleAlign, values[possibleAlign]));
pos = getFastValue(pos, '+', Math.floor(num/possibleAlign)*possibleAlign);
num %= possibleAlign;
}
});
return ret.join('; ');
// If we don't know how to handle this at compile-time, or handling it is best done in a large amount of code, call memset
// TODO: optimize the case of numeric num but non-numeric value
if (!isNumber(num) || !isNumber(value) || (parseInt(num)/align >= UNROLL_LOOP_MAX)) {
return '_memset(' + asmCoercion(getFastValue(ptr, '+', pos), 'i32') + ', ' + asmCoercion(value, 'i32') + ', ' + asmCoercion(num, 'i32') + ')|0';
}
num = parseInt(num);
value = parseInt(value);
if (value < 0) value += 256; // make it unsigned
var values = {
1: value,
2: value | (value << 8),
4: value | (value << 8) | (value << 16) | (value << 24)
};
var ret = [];
[4, 2, 1].forEach(function(possibleAlign) {
if (num == 0) return;
if (align >= possibleAlign) {
ret.push(unroll('i' + (possibleAlign*8), Math.floor(num/possibleAlign), possibleAlign, values[possibleAlign]));
pos = getFastValue(pos, '+', Math.floor(num/possibleAlign)*possibleAlign);
num %= possibleAlign;
}
});
return ret.join('; ');
}
var TYPED_ARRAY_SET_MIN = Infinity; // .set() as memcpy seems to just slow us down
@ -1504,53 +1484,34 @@ function makeCopyValues(dest, src, num, type, modifier, align, sep) {
function unroll(type, num, jump) {
jump = jump || 1;
return range(num).map(function(i) {
if (USE_TYPED_ARRAYS <= 1 && type === 'null') {
// Null is special-cased: We copy over all heaps
return makeGetSlabs(dest, 'null', true).map(function(slab) {
return slab + '[' + getFastValue(dest, '+', i) + ']=' + slab + '[' + getFastValue(src, '+', i) + ']';
}).join(sep) + (SAFE_HEAP ? sep + 'SAFE_HEAP_COPY_HISTORY(' + getFastValue(dest, '+', i) + ', ' + getFastValue(src, '+', i) + ')' : '');
} else {
return makeSetValue(dest, i*jump, makeGetValue(src, i*jump, type), type);
}
return makeSetValue(dest, i*jump, makeGetValue(src, i*jump, type), type);
}).join(sep);
}
if (USE_TYPED_ARRAYS <= 1) {
if (isNumber(num) && parseInt(num) <= UNROLL_LOOP_MAX) {
return unroll(type, num);
}
var oldDest = dest, oldSrc = src;
dest = '$$dest';
src = '$$src';
return 'for (var $$src = ' + oldSrc + ', $$dest = ' + oldDest + ', $$stop = $$src + ' + num + '; $$src < $$stop; $$src++, $$dest++) {\n' +
unroll(type, 1) + ' }';
} else { // USE_TYPED_ARRAYS == 2
// If we don't know how to handle this at compile-time, or handling it is best done in a large amount of code, call memcpy
if (!isNumber(num)) num = stripCorrections(num);
if (!isNumber(align)) align = stripCorrections(align);
if (!isNumber(num) || (parseInt(num)/align >= UNROLL_LOOP_MAX)) {
return '(_memcpy(' + dest + ', ' + src + ', ' + num + ')|0)';
}
num = parseInt(num);
if (ASM_JS) {
dest = stripCorrections(dest); // remove corrections, since we will be correcting after we add anyhow,
src = stripCorrections(src); // and in the heap assignment expression
}
var ret = [];
[4, 2, 1].forEach(function(possibleAlign) {
if (num == 0) return;
if (align >= possibleAlign) {
ret.push(unroll('i' + (possibleAlign*8), Math.floor(num/possibleAlign), possibleAlign));
src = getFastValue(src, '+', Math.floor(num/possibleAlign)*possibleAlign);
dest = getFastValue(dest, '+', Math.floor(num/possibleAlign)*possibleAlign);
num %= possibleAlign;
}
});
return ret.join(sep);
// If we don't know how to handle this at compile-time, or handling it is best done in a large amount of code, call memcpy
if (!isNumber(num)) num = stripCorrections(num);
if (!isNumber(align)) align = stripCorrections(align);
if (!isNumber(num) || (parseInt(num)/align >= UNROLL_LOOP_MAX)) {
return '(_memcpy(' + dest + ', ' + src + ', ' + num + ')|0)';
}
num = parseInt(num);
if (ASM_JS) {
dest = stripCorrections(dest); // remove corrections, since we will be correcting after we add anyhow,
src = stripCorrections(src); // and in the heap assignment expression
}
var ret = [];
[4, 2, 1].forEach(function(possibleAlign) {
if (num == 0) return;
if (align >= possibleAlign) {
ret.push(unroll('i' + (possibleAlign*8), Math.floor(num/possibleAlign), possibleAlign));
src = getFastValue(src, '+', Math.floor(num/possibleAlign)*possibleAlign);
dest = getFastValue(dest, '+', Math.floor(num/possibleAlign)*possibleAlign);
num %= possibleAlign;
}
});
return ret.join(sep);
}
function makeHEAPView(which, start, end) {
// Assumes USE_TYPED_ARRAYS == 2
var size = parseInt(which.replace('U', '').replace('F', ''))/8;
var mod = size == 1 ? '' : ('>>' + log2(size));
return 'HEAP' + which + '.subarray((' + start + ')' + mod + ',(' + end + ')' + mod + ')';
@ -1724,7 +1685,7 @@ function writeInt8s(slab, i, value, type) {
function makePointer(slab, pos, allocator, type, ptr, finalMemoryInitialization) {
assert(type, 'makePointer requires type info');
if (typeof slab == 'string' && (slab.substr(0, 4) === 'HEAP' || (USE_TYPED_ARRAYS == 1 && slab in IHEAP_FHEAP))) return pos;
if (typeof slab == 'string' && (slab.substr(0, 4) === 'HEAP')) return pos;
var types = generateStructTypes(type);
if (typeof slab == 'object') {
for (var i = 0; i < slab.length; i++) {
@ -1737,42 +1698,17 @@ function makePointer(slab, pos, allocator, type, ptr, finalMemoryInitialization)
}
}
// compress type info and data if possible
if (USE_TYPED_ARRAYS != 2) {
var de;
try {
// compress all-zeros into a number (which will become zeros(..)).
// note that we cannot always eval the slab, e.g., if it contains ident,0,0 etc. In that case, no compression TODO: ensure we get arrays here, not str
var evaled = typeof slab === 'string' ? eval(slab) : slab;
de = dedup(evaled);
if (de.length === 1 && de[0] == 0) {
slab = types.length;
}
// TODO: if not all zeros, at least filter out items with type === 0. requires cleverness to know how to skip at runtime though. also
// be careful of structure padding
} catch(e){}
de = dedup(types);
if (de.length === 1) {
types = de[0];
} else if (de.length === 2 && typeof slab === 'number') {
// If slab is all zeros, we can compress types even if we have i32,0,0,0,i32,0,0,0 etc. - we do not need the zeros
de = de.filter(function(x) { return x !== 0 });
if (de.length === 1) {
types = de[0];
}
}
} else { // USE_TYPED_ARRAYS == 2
if (!finalMemoryInitialization) {
// XXX This heavily assumes the target endianness is the same as our current endianness! XXX
var i = 0;
while (i < slab.length) {
var currType = types[i];
if (!currType) { i++; continue }
i += writeInt8s(slab, i, slab[i], currType);
}
types = 'i8';
if (!finalMemoryInitialization) {
// XXX This heavily assumes the target endianness is the same as our current endianness! XXX
var i = 0;
while (i < slab.length) {
var currType = types[i];
if (!currType) { i++; continue }
i += writeInt8s(slab, i, slab[i], currType);
}
types = 'i8';
}
if (allocator == 'ALLOC_NONE' && USE_TYPED_ARRAYS == 2) {
if (allocator == 'ALLOC_NONE') {
if (!finalMemoryInitialization) {
// writing out into memory, without a normal allocation. We put all of these into a single big chunk.
assert(typeof slab == 'object');
@ -1800,37 +1736,20 @@ function makePointer(slab, pos, allocator, type, ptr, finalMemoryInitialization)
function makeGetSlabs(ptr, type, allowMultiple, unsigned) {
assert(type);
if (!USE_TYPED_ARRAYS) {
return ['HEAP'];
} else if (USE_TYPED_ARRAYS == 1) {
if (type in Compiletime.FLOAT_TYPES || type === 'int64') { // XXX should be i64, no?
return ['FHEAP']; // If USE_FHEAP is false, will fail at runtime. At compiletime we do need it for library stuff.
} else if (type in Compiletime.INT_TYPES || isPointerType(type)) {
return [unsigned ? 'IHEAPU' : 'IHEAP'];
} else {
assert(allowMultiple, 'Unknown slab type and !allowMultiple: ' + type);
if (USE_FHEAP) {
return ['IHEAP', 'FHEAP']; // unknown, so assign to both typed arrays
} else {
return ['IHEAP'];
}
if (isPointerType(type)) type = 'i32'; // Hardcoded 32-bit
switch(type) {
case 'i1': case 'i8': return [unsigned ? 'HEAPU8' : 'HEAP8']; break;
case 'i16': return [unsigned ? 'HEAPU16' : 'HEAP16']; break;
case '<4 x i32>':
case 'i32': case 'i64': return [unsigned ? 'HEAPU32' : 'HEAP32']; break;
case 'double': {
if (TARGET_ASMJS_UNKNOWN_EMSCRIPTEN) return ['HEAPF64']; // in asmjs-unknown-emscripten, we do have the ability to assume 64-bit alignment
// otherwise, fall through to float
}
} else { // USE_TYPED_ARRAYS == 2)
if (isPointerType(type)) type = 'i32'; // Hardcoded 32-bit
switch(type) {
case 'i1': case 'i8': return [unsigned ? 'HEAPU8' : 'HEAP8']; break;
case 'i16': return [unsigned ? 'HEAPU16' : 'HEAP16']; break;
case '<4 x i32>':
case 'i32': case 'i64': return [unsigned ? 'HEAPU32' : 'HEAP32']; break;
case 'double': {
if (TARGET_ASMJS_UNKNOWN_EMSCRIPTEN) return ['HEAPF64']; // in asmjs-unknown-emscripten, we do have the ability to assume 64-bit alignment
// otherwise, fall through to float
}
case '<4 x float>':
case 'float': return ['HEAPF32'];
default: {
throw 'what, exactly, can we do for unknown types in TA2?! ' + [new Error().stack, ptr, type, allowMultiple, unsigned];
}
case '<4 x float>':
case 'float': return ['HEAPF32'];
default: {
throw 'what, exactly, can we do for unknown types in TA2?! ' + [new Error().stack, ptr, type, allowMultiple, unsigned];
}
}
return [];
@ -1889,10 +1808,8 @@ function finalizeLLVMFunctionCall(item, noIndexizeFunctions) {
function getGetElementPtrIndexes(item) {
var type = item.params[0].type;
if (USE_TYPED_ARRAYS == 2) {
// GEP indexes are marked as i64s, but they are just numbers to us
item.params.forEach(function(param) { param.type = 'i32' });
}
// GEP indexes are marked as i64s, but they are just numbers to us
item.params.forEach(function(param) { param.type = 'i32' });
item.params = item.params.map(finalizeLLVMParameter);
var ident = item.params[0];
@ -1966,35 +1883,20 @@ function handleOverflow(text, bits) {
}
function makeLLVMStruct(values) {
if (USE_TYPED_ARRAYS == 2) {
return 'DEPRECATED' + (new Error().stack) + 'XXX';
} else {
return '{ ' + values.map(function(value, i) { return 'f' + i + ': ' + value }).join(', ') + ' }'
}
return 'DEPRECATED' + (new Error().stack) + 'XXX';
}
function makeStructuralReturn(values, inAsm) {
if (USE_TYPED_ARRAYS == 2) {
var i = -1;
return 'return ' + asmCoercion(values.slice(1).map(function(value) {
i++;
return ASM_JS ? (inAsm ? 'tempRet' + i + ' = ' + value : 'asm["setTempRet' + i + '"](' + value + ')')
: 'tempRet' + i + ' = ' + value;
}).concat([values[0]]).join(','), 'i32');
} else {
var i = 0;
return 'return { ' + values.map(function(value) {
return 'f' + (i++) + ': ' + value;
}).join(', ') + ' }';
}
var i = -1;
return 'return ' + asmCoercion(values.slice(1).map(function(value) {
i++;
return ASM_JS ? (inAsm ? 'tempRet' + i + ' = ' + value : 'asm["setTempRet' + i + '"](' + value + ')')
: 'tempRet' + i + ' = ' + value;
}).concat([values[0]]).join(','), 'i32');
}
function makeStructuralAccess(ident, i) {
if (USE_TYPED_ARRAYS == 2) {
return ident + '$' + i;
} else {
return ident + '.f' + i;
}
return ident + '$' + i;
}
function makeThrow(what) {
@ -2023,7 +1925,7 @@ function finalizeLLVMParameter(param, noIndexizeFunctions) {
if (ret in Variables.globals) {
ret = makeGlobalUse(ret);
}
if (param.type == 'i64' && USE_TYPED_ARRAYS == 2) {
if (param.type == 'i64') {
ret = parseI64Constant(ret);
}
ret = parseNumerical(ret, param.type);
@ -2058,7 +1960,7 @@ function makeComparison(a, op, b, type) {
}
function makeSignOp(value, type, op, force, ignore) {
if (USE_TYPED_ARRAYS == 2 && type == 'i64') {
if (type == 'i64') {
return value; // these are always assumed to be two 32-bit unsigneds.
}
if (isPointerType(type)) type = 'i32'; // Pointers are treated as 32-bit ints
@ -2170,7 +2072,7 @@ function isSignedOp(op, variant) {
return op in SIGNED_OP || (variant && variant[0] == 's');
}
var legalizedI64s = USE_TYPED_ARRAYS == 2; // We do not legalize globals, but do legalize function lines. This will be true in the latter case
var legalizedI64s = true; // We do not legalize globals, but do legalize function lines. This will be true in the latter case
function processMathop(item) {
var op = item.op;
@ -2210,7 +2112,7 @@ function processMathop(item) {
return makeInlineCalculation('VALUE-VALUE%1', value, 'tempBigIntI');
}
if ((type == 'i64' || paramTypes[0] == 'i64' || paramTypes[1] == 'i64' || idents[1] == '(i64)' || rawBits > 32) && USE_TYPED_ARRAYS == 2) {
if ((type == 'i64' || paramTypes[0] == 'i64' || paramTypes[1] == 'i64' || idents[1] == '(i64)' || rawBits > 32)) {
// this code assumes i64 for the most part
if (ASSERTIONS && rawBits > 1 && rawBits < 64) {
warnOnce('processMathop processing illegal non-i64 value');
@ -2350,7 +2252,6 @@ function processMathop(item) {
}
case 'bitcast': {
// Pointers are not 64-bit, so there is really only one possible type of bitcast here, int to float or vice versa
assert(USE_TYPED_ARRAYS == 2, 'Can only bitcast ints <-> floats with typed arrays mode 2');
var inType = item.params[0].type;
var outType = item.type;
if (inType in Compiletime.INT_TYPES && outType in Compiletime.FLOAT_TYPES) {
@ -2366,7 +2267,7 @@ function processMathop(item) {
return '(' + makeSetTempDouble(0, 'double', idents[0]) + ',[' + makeGetTempDouble(0, 'i32') + ',' + makeGetTempDouble(1, 'i32') + '])';
}
} else {
throw 'Invalid USE_TYPED_ARRAYS == 2 bitcast: ' + dump(item) + ' : ' + item.params[0].type;
throw 'Invalid bitcast: ' + dump(item) + ' : ' + item.params[0].type;
}
}
default: throw 'Unsupported i64 mode 1 op: ' + item.op + ' : ' + dump(item);
@ -2541,7 +2442,6 @@ function processMathop(item) {
var outType = item.type;
if ((inType in Compiletime.INT_TYPES && outType in Compiletime.FLOAT_TYPES) ||
(inType in Compiletime.FLOAT_TYPES && outType in Compiletime.INT_TYPES)) {
assert(USE_TYPED_ARRAYS == 2, 'Can only bitcast ints <-> floats with typed arrays mode 2');
if (inType in Compiletime.INT_TYPES) {
return '(' + makeSetTempDouble(0, 'i32', idents[0]) + ',' + makeGetTempDouble(0, 'float') + ')';
} else {

Просмотреть файл

@ -9,25 +9,17 @@ if (memoryInitializer) {
}
if (ENVIRONMENT_IS_NODE || ENVIRONMENT_IS_SHELL) {
var data = Module['readBinary'](memoryInitializer);
#if USE_TYPED_ARRAYS == 2
HEAPU8.set(data, STATIC_BASE);
#else
allocate(data, 'i8', ALLOC_NONE, STATIC_BASE);
#endif
} else {
addRunDependency('memory initializer');
var applyMemoryInitializer = function(data) {
if (data.byteLength) data = new Uint8Array(data);
#if USE_TYPED_ARRAYS == 2
#if ASSERTIONS
for (var i = 0; i < data.length; i++) {
assert(HEAPU8[STATIC_BASE + i] === 0, "area for memory initializer should not have been touched before it's loaded");
}
#endif
HEAPU8.set(data, STATIC_BASE);
#else
allocate(data, 'i8', ALLOC_NONE, STATIC_BASE);
#endif
removeRunDependency('memory initializer');
}
var request = Module['memoryInitializerRequest'];

Просмотреть файл

@ -46,51 +46,12 @@ function SAFE_HEAP_ACCESS(dest, type, store, ignore, storeValue) {
if (dest <= 0) abort('segmentation fault ' + (store ? ('storing value ' + storeValue) : 'loading') + ' type ' + type + ' at address ' + dest);
#if USE_TYPED_ARRAYS
// When using typed arrays, reads over the top of TOTAL_MEMORY will fail silently, so we must
// correct that by growing TOTAL_MEMORY as needed. Without typed arrays, memory is a normal
// JS array so it will work (potentially slowly, depending on the engine).
if (!ignore && dest >= Math.max(DYNAMICTOP, STATICTOP)) abort('segmentation fault ' + (store ? ('storing value ' + storeValue) : 'loading') + ' type ' + type + ' at address ' + dest + '. Heap ends at address ' + Math.max(DYNAMICTOP, STATICTOP));
assert(ignore || DYNAMICTOP <= TOTAL_MEMORY);
#endif
#if USE_TYPED_ARRAYS == 2
return; // It is legitimate to violate the load-store assumption in this case
#endif
if (type && type.charAt(type.length-1) == '*') type = 'i32'; // pointers are ints, for our purposes here
// Note that this will pass even with unions: You can store X, load X, then store Y and load Y.
// You cannot, however, do the nonportable act of store X and load Y!
if (store) {
HEAP_HISTORY[dest] = ignore ? null : type;
} else {
#if USE_TYPED_ARRAYS == 0
if (!HEAP[dest] && HEAP[dest] !== 0 && HEAP[dest] !== false && !ignore) { // false can be the result of a mathop comparator
var error = true;
try {
if (HEAP[dest].toString() === 'NaN') error = false; // NaN is acceptable, as a double value
} catch(e){}
if (error) throw('Warning: Reading an invalid value at ' + dest + ' :: ' + stackTrace() + '\n');
}
#endif
if (type === null) return;
var history = HEAP_HISTORY[dest];
if (history === null) return;
if (!ignore)
assert(history, 'Must have a history for a safe heap load! ' + dest + ':' + type); // Warning - bit fields in C structs cause loads+stores for each store, so
// they will show up here...
// assert((history && history[0]) /* || HEAP[dest] === 0 */, "Loading from where there was no store! " + dest + ',' + HEAP[dest] + ',' + type + ', \n\n' + stackTrace() + '\n');
// if (history[0].type !== type) {
if (history !== type && !ignore) {
Module.print('Load-store consistency assumption failure! ' + dest);
Module.print('\n');
Module.print(JSON.stringify(history));
Module.print('\n');
Module.print('LOAD: ' + type + ', ' + stackTrace());
Module.print('\n');
SAFE_HEAP_ERRORS++;
assert(SAFE_HEAP_ERRORS <= ACCEPTABLE_SAFE_HEAP_ERRORS, 'Load-store consistency assumption failure!');
}
}
}
function SAFE_HEAP_STORE(dest, value, type, ignore) {
@ -109,7 +70,6 @@ function SAFE_HEAP_STORE(dest, value, type, ignore) {
throw "Bad store!" + dest;
}
#if USE_TYPED_ARRAYS == 2
// Check alignment
switch(type) {
case 'i16': assert(dest % 2 == 0); break;
@ -122,7 +82,6 @@ function SAFE_HEAP_STORE(dest, value, type, ignore) {
case 'double': assert(dest % 4 == 0); break;
#endif
}
#endif
setValue(dest, value, type, 1);
}
@ -134,7 +93,6 @@ function SAFE_HEAP_LOAD(dest, type, unsigned, ignore) {
Module.print('SAFE_HEAP load: ' + [dest, type, getValue(dest, type, 1), ignore]);
#endif
#if USE_TYPED_ARRAYS == 2
// Check alignment
switch(type) {
case 'i16': assert(dest % 2 == 0); break;
@ -147,7 +105,6 @@ function SAFE_HEAP_LOAD(dest, type, unsigned, ignore) {
case 'double': assert(dest % 4 == 0); break;
#endif
}
#endif
var ret = getValue(dest, type, 1);
if (unsigned) ret = unSign(ret, parseInt(type.substr(1)), 1);
@ -304,10 +261,8 @@ var undef = 0;
// tempInt is used for 32-bit signed values or smaller. tempBigInt is used
// for 32-bit unsigned values or more than 32 bits. TODO: audit all uses of tempInt
var tempValue, tempInt, tempBigInt, tempInt2, tempBigInt2, tempPair, tempBigIntI, tempBigIntR, tempBigIntS, tempBigIntP, tempBigIntD, tempDouble, tempFloat;
#if USE_TYPED_ARRAYS == 2
var tempI64, tempI64b;
var tempRet0, tempRet1, tempRet2, tempRet3, tempRet4, tempRet5, tempRet6, tempRet7, tempRet8, tempRet9;
#endif
function assert(condition, text) {
if (!condition) {
@ -598,13 +553,11 @@ function allocate(slab, types, allocator, ptr) {
if (zeroinit) {
var ptr = ret, stop;
#if USE_TYPED_ARRAYS == 2
assert((ret & 3) == 0);
stop = ret + (size & ~3);
for (; ptr < stop; ptr += 4) {
{{{ makeSetValue('ptr', '0', '0', 'i32', null, true) }}};
}
#endif
stop = ret + size;
while (ptr < stop) {
{{{ makeSetValue('ptr++', '0', '0', 'i8', null, true) }}};
@ -612,7 +565,6 @@ function allocate(slab, types, allocator, ptr) {
return ret;
}
#if USE_TYPED_ARRAYS == 2
if (singleType === 'i8') {
if (slab.subarray || slab.slice) {
HEAPU8.set(slab, ret);
@ -621,7 +573,6 @@ function allocate(slab, types, allocator, ptr) {
}
return ret;
}
#endif
var i = 0, type, typeSize, previousType;
while (i < size) {
@ -640,9 +591,7 @@ function allocate(slab, types, allocator, ptr) {
assert(type, 'Must know what type to store in allocate!');
#endif
#if USE_TYPED_ARRAYS == 2
if (type == 'i64') type = 'i32'; // special case: we have one i32 here, and one i32 later
#endif
setValue(ret+i, curr, type);
@ -680,7 +629,6 @@ function Pointer_stringify(ptr, /* optional */ length) {
var ret = '';
if (hasUtf < 128) {
#if USE_TYPED_ARRAYS == 2
var MAX_CHUNK = 1024; // split up into chunks, because .apply on a huge string can overflow the stack
var curr;
while (length > 0) {
@ -690,9 +638,6 @@ function Pointer_stringify(ptr, /* optional */ length) {
length -= MAX_CHUNK;
}
return ret;
#else
return Module['AsciiToString'](ptr);
#endif
}
return Module['UTF8ToString'](ptr);
}
@ -1223,21 +1168,12 @@ function alignMemoryPage(x) {
}
var HEAP;
#if USE_TYPED_ARRAYS == 1
var IHEAP, IHEAPU;
#if USE_FHEAP
var FHEAP;
#endif
#endif
#if USE_TYPED_ARRAYS == 2
var HEAP8, HEAPU8, HEAP16, HEAPU16, HEAP32, HEAPU32, HEAPF32, HEAPF64;
#endif
var STATIC_BASE = 0, STATICTOP = 0, staticSealed = false; // static area
var STACK_BASE = 0, STACKTOP = 0, STACK_MAX = 0; // stack area
var DYNAMIC_BASE = 0, DYNAMICTOP = 0; // dynamic area handled by sbrk
#if USE_TYPED_ARRAYS
function enlargeMemory() {
#if ALLOW_MEMORY_GROWTH == 0
abort('Cannot enlarge memory arrays. Either (1) compile with -s TOTAL_MEMORY=X with X higher than the current value ' + TOTAL_MEMORY + ', (2) compile with ALLOW_MEMORY_GROWTH which adjusts the size at runtime but prevents some optimizations, or (3) set Module.TOTAL_MEMORY before the program runs.');
@ -1323,7 +1259,6 @@ function enlargeMemory() {
return true;
#endif
}
#endif
#if ALLOW_MEMORY_GROWTH
var byteLength;
@ -1400,20 +1335,10 @@ if (totalMemory !== TOTAL_MEMORY) {
// Initialize the runtime's memory
#if USE_TYPED_ARRAYS
// check for full engine support (use string 'subarray' to avoid closure compiler confusion)
assert(typeof Int32Array !== 'undefined' && typeof Float64Array !== 'undefined' && !!(new Int32Array(1)['subarray']) && !!(new Int32Array(1)['set']),
'JS engine does not provide full typed array support');
#if USE_TYPED_ARRAYS == 1
HEAP = IHEAP = new Int32Array(TOTAL_MEMORY);
IHEAPU = new Uint32Array(IHEAP.buffer);
#if USE_FHEAP
FHEAP = new Float64Array(TOTAL_MEMORY);
#endif
#endif
#if USE_TYPED_ARRAYS == 2
#if POINTER_MASKING
#if POINTER_MASKING_DYNAMIC
var buffer = new ArrayBuffer(TOTAL_MEMORY + (POINTER_MASKING_ENABLED ? POINTER_MASKING_OVERFLOW : 0));
@ -1423,7 +1348,6 @@ var buffer = new ArrayBuffer(TOTAL_MEMORY + {{{ POINTER_MASKING_OVERFLOW }}});
#else
var buffer = new ArrayBuffer(TOTAL_MEMORY);
#endif // POINTER_MASKING
#endif // USE_TYPED_ARRAYS == 2
HEAP8 = new Int8Array(buffer);
HEAP16 = new Int16Array(buffer);
@ -1437,16 +1361,8 @@ HEAPF64 = new Float64Array(buffer);
// Endianness check (note: assumes compiler arch was little-endian)
HEAP32[0] = 255;
assert(HEAPU8[0] === 255 && HEAPU8[3] === 0, 'Typed arrays 2 must be run on a little-endian system');
#endif // USE_TYPED_ARRAYS
Module['HEAP'] = HEAP;
#if USE_TYPED_ARRAYS == 1
Module['IHEAP'] = IHEAP;
#if USE_FHEAP
Module['FHEAP'] = FHEAP;
#endif
#endif
#if USE_TYPED_ARRAYS == 2
Module['buffer'] = buffer;
Module['HEAP8'] = HEAP8;
Module['HEAP16'] = HEAP16;
@ -1456,7 +1372,6 @@ Module['HEAPU16'] = HEAPU16;
Module['HEAPU32'] = HEAPU32;
Module['HEAPF32'] = HEAPF32;
Module['HEAPF64'] = HEAPF64;
#endif
function callRuntimeCallbacks(callbacks) {
while(callbacks.length > 0) {

Просмотреть файл

@ -23,7 +23,7 @@ var RuntimeGenerator = {
// An allocation that lives as long as the current function call
stackAlloc: function(size, sep) {
sep = sep || ';';
var ret = RuntimeGenerator.alloc(size, 'STACK', false, sep, USE_TYPED_ARRAYS != 2 || (isNumber(size) && parseInt(size) % {{{ STACK_ALIGN }}} == 0));
var ret = RuntimeGenerator.alloc(size, 'STACK', false, sep, (isNumber(size) && parseInt(size) % {{{ STACK_ALIGN }}} == 0));
if (ASSERTIONS) {
ret += sep + '(assert(' + asmCoercion('(STACKTOP|0) < (STACK_MAX|0)', 'i32') + ')|0)';
}
@ -34,11 +34,9 @@ var RuntimeGenerator = {
if (initial === 0 && SKIP_STACK_IN_SMALL && !force) return '';
var ret = 'var sp=' + (ASM_JS ? '0;sp=' : '') + 'STACKTOP';
if (initial > 0) ret += ';STACKTOP=(STACKTOP+' + initial + ')|0';
if (USE_TYPED_ARRAYS == 2) {
assert(initial % Runtime.STACK_ALIGN == 0);
if (ASSERTIONS && Runtime.STACK_ALIGN == 4) {
ret += '; (assert(' + asmCoercion('!(STACKTOP&3)', 'i32') + ')|0)';
}
assert(initial % Runtime.STACK_ALIGN == 0);
if (ASSERTIONS && Runtime.STACK_ALIGN == 4) {
ret += '; (assert(' + asmCoercion('!(STACKTOP&3)', 'i32') + ')|0)';
}
if (ASSERTIONS) {
ret += '; (assert(' + asmCoercion('(STACKTOP|0) < (STACK_MAX|0)', 'i32') + ')|0)';
@ -67,7 +65,7 @@ var RuntimeGenerator = {
dynamicAlloc: function(size) {
if (ASSERTIONS) size = '(assert(DYNAMICTOP > 0),' + size + ')'; // dynamic area must be ready
var ret = RuntimeGenerator.alloc(size, 'DYNAMIC', INIT_HEAP);
if (USE_TYPED_ARRAYS) ret += '; if (DYNAMICTOP >= TOTAL_MEMORY) { var success = enlargeMemory(); if (!success) return 0; }'
ret += '; if (DYNAMICTOP >= TOTAL_MEMORY) { var success = enlargeMemory(); if (!success) return 0; }'
return ret;
},

Просмотреть файл

@ -78,15 +78,12 @@ var RELOOPER_BUFFER_SIZE = 20*1024*1024; // The internal relooper buffer size. I
// on OutputBuffer.
var USE_TYPED_ARRAYS = 2; // Use typed arrays for the heap. See https://github.com/kripken/emscripten/wiki/Code-Generation-Modes/
// 0 means no typed arrays are used. This mode disallows LLVM optimizations
// 1 has two heaps, IHEAP (int32) and FHEAP (double),
// and addresses there are a match for normal addresses. This mode disallows LLVM optimizations.
// 2 is a single heap, accessible through views as int8, int32, etc. This is
// the recommended mode both for performance and for compatibility.
// the only supported mode.
var USE_FHEAP = 1; // Relevant in USE_TYPED_ARRAYS == 1. If this is disabled, only IHEAP will be used, and FHEAP
// not generated at all. This is useful if your code is 100% ints without floats or doubles
var DOUBLE_MODE = 1; // How to load and store 64-bit doubles. Without typed arrays or in typed array mode 1,
// this doesn't matter - these values are just values like any other. In typed array mode 2,
var DOUBLE_MODE = 1; // How to load and store 64-bit doubles.
// A potential risk is that doubles may be only 32-bit aligned. Forcing 64-bit alignment
// a potential risk is that doubles may be only 32-bit aligned. Forcing 64-bit alignment
// in Clang itself should be able to solve that, or as a workaround in DOUBLE_MODE 1 we
// will carefully load in parts, in a way that requires only 32-bit alignment. In DOUBLE_MODE
@ -97,9 +94,8 @@ var DOUBLE_MODE = 1; // How to load and store 64-bit doubles. Without typed arra
// then load it aligned, and that load-store will make JS engines alter it if it is being
// stored to a typed array for security reasons. That will 'fix' the number from being a
// NaN or an infinite number.
var UNALIGNED_MEMORY = 0; // If enabled, all memory accesses are assumed to be unaligned. (This only matters in
// typed arrays mode 2 where alignment is relevant.) In unaligned memory mode, you
// can run nonportable code that typically would break in JS (or on ARM for that
var UNALIGNED_MEMORY = 0; // If enabled, all memory accesses are assumed to be unaligned. In unaligned memory mode,
// you can run nonportable code that typically would break in JS (or on ARM for that
// matter, which also cannot do unaligned reads/writes), at the cost of slowness
var FORCE_ALIGNED_MEMORY = 0; // If enabled, assumes all reads and writes are fully aligned for the type they
// use. This is true in proper C code (no undefined behavior), but is sadly

Просмотреть файл

@ -40,8 +40,6 @@ class T(RunnerCore): # Short name, to make it more fun to use manually on the co
self.do_run_from_file(src, output, force_c=True)
def test_i64(self):
if Settings.USE_TYPED_ARRAYS != 2: return self.skip('i64 mode 1 requires ta2')
src = '''
#include <stdio.h>
int main()
@ -267,48 +265,36 @@ class T(RunnerCore): # Short name, to make it more fun to use manually on the co
self.do_run(src, '*1*\n*0*\n*0*\n')
def test_i64_b(self):
if Settings.USE_TYPED_ARRAYS != 2: return self.skip('full i64 stuff only in ta2')
test_path = path_from_root('tests', 'core', 'test_i64_b')
src, output = (test_path + s for s in ('.in', '.out'))
self.do_run_from_file(src, output)
def test_i64_cmp(self):
if Settings.USE_TYPED_ARRAYS != 2: return self.skip('full i64 stuff only in ta2')
test_path = path_from_root('tests', 'core', 'test_i64_cmp')
src, output = (test_path + s for s in ('.in', '.out'))
self.do_run_from_file(src, output)
def test_i64_cmp2(self):
if Settings.USE_TYPED_ARRAYS != 2: return self.skip('full i64 stuff only in ta2')
test_path = path_from_root('tests', 'core', 'test_i64_cmp2')
src, output = (test_path + s for s in ('.in', '.out'))
self.do_run_from_file(src, output)
def test_i64_double(self):
if Settings.USE_TYPED_ARRAYS != 2: return self.skip('full i64 stuff only in ta2')
test_path = path_from_root('tests', 'core', 'test_i64_double')
src, output = (test_path + s for s in ('.in', '.out'))
self.do_run_from_file(src, output)
def test_i64_umul(self):
if Settings.USE_TYPED_ARRAYS != 2: return self.skip('full i64 stuff only in ta2')
test_path = path_from_root('tests', 'core', 'test_i64_umul')
src, output = (test_path + s for s in ('.in', '.out'))
self.do_run_from_file(src, output)
def test_i64_precise(self):
if Settings.USE_TYPED_ARRAYS != 2: return self.skip('full i64 stuff only in ta2')
src = r'''
#include <inttypes.h>
#include <stdio.h>
@ -382,7 +368,6 @@ class T(RunnerCore): # Short name, to make it more fun to use manually on the co
''', 'c = 4ca38a6bd2973f97')
def test_i64_llabs(self):
if Settings.USE_TYPED_ARRAYS != 2: return self.skip('full i64 stuff only in ta2')
Settings.PRECISE_I64_MATH = 2
test_path = path_from_root('tests', 'core', 'test_i64_llabs')
@ -391,40 +376,30 @@ class T(RunnerCore): # Short name, to make it more fun to use manually on the co
self.do_run_from_file(src, output)
def test_i64_zextneg(self):
if Settings.USE_TYPED_ARRAYS != 2: return self.skip('full i64 stuff only in ta2')
test_path = path_from_root('tests', 'core', 'test_i64_zextneg')
src, output = (test_path + s for s in ('.in', '.out'))
self.do_run_from_file(src, output)
def test_i64_7z(self):
if Settings.USE_TYPED_ARRAYS != 2: return self.skip('full i64 stuff only in ta2')
test_path = path_from_root('tests', 'core', 'test_i64_7z')
src, output = (test_path + s for s in ('.in', '.out'))
self.do_run_from_file(src, output, ['hallo'])
def test_i64_i16(self):
if Settings.USE_TYPED_ARRAYS != 2: return self.skip('full i64 stuff only in ta2')
test_path = path_from_root('tests', 'core', 'test_i64_i16')
src, output = (test_path + s for s in ('.in', '.out'))
self.do_run_from_file(src, output)
def test_i64_qdouble(self):
if Settings.USE_TYPED_ARRAYS != 2: return self.skip('full i64 stuff only in ta2')
test_path = path_from_root('tests', 'core', 'test_i64_qdouble')
src, output = (test_path + s for s in ('.in', '.out'))
self.do_run_from_file(src, output)
def test_i64_varargs(self):
if Settings.USE_TYPED_ARRAYS != 2: return self.skip('full i64 stuff only in ta2')
test_path = path_from_root('tests', 'core', 'test_i64_varargs')
src, output = (test_path + s for s in ('.in', '.out'))
@ -474,8 +449,6 @@ class T(RunnerCore): # Short name, to make it more fun to use manually on the co
self.do_run_from_file(src, output)
def test_double_i64_conversion(self):
if Settings.USE_TYPED_ARRAYS != 2: return self.skip('needs ta2')
test_path = path_from_root('tests', 'core', 'test_double_i64_conversion')
src, output = (test_path + s for s in ('.in', '.out'))
@ -516,8 +489,6 @@ class T(RunnerCore): # Short name, to make it more fun to use manually on the co
self.do_run_from_file(src, output)
def test_bswap64(self):
if Settings.USE_TYPED_ARRAYS != 2: return self.skip('needs ta2')
test_path = path_from_root('tests', 'core', 'test_bswap64')
src, output = (test_path + s for s in ('.in', '.out'))
@ -555,7 +526,6 @@ class T(RunnerCore): # Short name, to make it more fun to use manually on the co
os.environ['EMSCRIPT_MAX_CHUNK_SIZE'] = chunk_size
# A good test of i64 math
if Settings.USE_TYPED_ARRAYS != 2: return self.skip('requires ta2 C-style memory aliasing')
self.do_run('', 'Usage: hashstring <seed>',
libraries=self.get_library('cube2hash', ['cube2hash.bc'], configure=None),
includes=[path_from_root('tests', 'cube2hash')])
@ -610,8 +580,6 @@ class T(RunnerCore): # Short name, to make it more fun to use manually on the co
# Test for undefined behavior in C. This is not legitimate code, but does exist
if Settings.USE_TYPED_ARRAYS != 2: return self.skip('No meaning to unaligned addresses without t2')
src = r'''
#include <stdio.h>
@ -735,13 +703,8 @@ class T(RunnerCore): # Short name, to make it more fun to use manually on the co
'''
self.do_run(src, '*4294967295,0,4294967219*\n*-1,1,-1,1*\n*-2,1,-2,1*\n*246,296*\n*1,0*')
# Now let's see some code that should just work in USE_TYPED_ARRAYS == 2, but requires
# corrections otherwise
Settings.CHECK_SIGNS = 0
if Settings.USE_TYPED_ARRAYS == 2:
Settings.CORRECT_SIGNS = 0
else:
Settings.CORRECT_SIGNS = 1
Settings.CORRECT_SIGNS = 0
src = '''
#include <stdio.h>
@ -805,7 +768,6 @@ class T(RunnerCore): # Short name, to make it more fun to use manually on the co
self.do_run_from_file(src, output)
def test_closebitcasts(self):
if Settings.USE_TYPED_ARRAYS != 2: return self.skip('requires ta2')
test_path = path_from_root('tests', 'core', 'closebitcasts')
src, output = (test_path + s for s in ('.c', '.txt'))
self.do_run_from_file(src, output)
@ -844,8 +806,6 @@ class T(RunnerCore): # Short name, to make it more fun to use manually on the co
self.do_run_from_file(src, output)
def test_math(self):
if Settings.USE_TYPED_ARRAYS != 2: return self.skip('requires ta2')
test_path = path_from_root('tests', 'core', 'test_math')
src, output = (test_path + s for s in ('.in', '.out'))
@ -894,8 +854,6 @@ class T(RunnerCore): # Short name, to make it more fun to use manually on the co
self.do_run_from_file(src, output)
def test_llrint(self):
if Settings.USE_TYPED_ARRAYS != 2: return self.skip('requires ta2')
test_path = path_from_root('tests', 'core', 'test_llrint')
src, output = (test_path + s for s in ('.in', '.out'))
@ -1915,8 +1873,6 @@ value = real 1.25 imag 0.00''', force_c=True)
self.do_run_from_file(src, output)
def test_alloca(self):
if Settings.USE_TYPED_ARRAYS != 2: return self.skip('non-ta2 may have unaligned allocas')
test_path = path_from_root('tests', 'core', 'test_alloca')
src, output = (test_path + s for s in ('.in', '.out'))
@ -2245,7 +2201,6 @@ def process(filename):
self.do_run_from_file(src, output, post_build=check)
def test_emscripten_get_now(self):
if Settings.USE_TYPED_ARRAYS != 2: return self.skip('requires ta2')
self.banned_js_engines = [V8_ENGINE] # timer limitations in v8 shell
if self.run_name == 'slow2asm':
@ -2288,7 +2243,6 @@ def process(filename):
self.do_run_from_file(src, output)
def test_memorygrowth(self):
if Settings.USE_TYPED_ARRAYS != 2: return self.skip('memory growth is only supported with typed arrays mode 2')
self.banned_js_engines = [V8_ENGINE] # stderr printing limitations in v8
self.emcc_args += ['-s', 'ALLOW_MEMORY_GROWTH=0'] # start with 0
@ -2422,8 +2376,6 @@ def process(filename):
self.do_run_from_file(src, output)
def test_indirectbr_many(self):
if Settings.USE_TYPED_ARRAYS != 2: return self.skip('blockaddr > 255 requires ta2')
test_path = path_from_root('tests', 'core', 'test_indirectbr_many')
src, output = (test_path + s for s in ('.in', '.out'))
@ -2474,7 +2426,6 @@ def process(filename):
self.do_run_from_file(src, output)
def test_varargs_byval(self):
if Settings.USE_TYPED_ARRAYS != 2: return self.skip('FIXME: Add support for this')
if self.is_emscripten_abi(): return self.skip('clang cannot compile this code with that target yet')
src = r'''
@ -2664,9 +2615,8 @@ The current type of b is: 9
def test_stdlibs(self):
if self.emcc_args is None: return self.skip('requires emcc')
if Settings.USE_TYPED_ARRAYS == 2:
# Typed arrays = 2 + safe heap prints a warning that messes up our output.
Settings.SAFE_HEAP = 0
# safe heap prints a warning that messes up our output.
Settings.SAFE_HEAP = 0
src = '''
#include <stdio.h>
#include <stdlib.h>
@ -2926,13 +2876,11 @@ The current type of b is: 9
self.do_run_from_file(src, output)
def test_memcpy3(self):
if Settings.USE_TYPED_ARRAYS != 2: return self.skip('need ta2')
test_path = path_from_root('tests', 'core', 'test_memcpy3')
src, output = (test_path + s for s in ('.c', '.out'))
self.do_run_from_file(src, output)
def test_memset(self):
if Settings.USE_TYPED_ARRAYS != 2: return self.skip('need ta2')
test_path = path_from_root('tests', 'core', 'test_memset')
src, output = (test_path + s for s in ('.c', '.out'))
self.do_run_from_file(src, output)
@ -2960,8 +2908,6 @@ The current type of b is: 9
self.do_run_from_file(src, output)
def test_memmove2(self):
if Settings.USE_TYPED_ARRAYS != 2: return self.skip('need ta2')
test_path = path_from_root('tests', 'core', 'test_memmove2')
src, output = (test_path + s for s in ('.in', '.out'))
@ -3219,8 +3165,7 @@ def process(filename):
def test_dlfcn_qsort(self):
if not self.can_dlfcn(): return
if Settings.USE_TYPED_ARRAYS == 2:
Settings.CORRECT_SIGNS = 1 # Needed for unsafe optimizations
Settings.CORRECT_SIGNS = 1 # Needed for unsafe optimizations
self.prep_dlfcn_lib()
Settings.EXPORTED_FUNCTIONS = ['_get_cmp']
@ -3306,7 +3251,6 @@ def process(filename):
def test_dlfcn_data_and_fptr(self):
if Settings.ASM_JS: return self.skip('this is not a valid case - libraries should not be able to access their parents globals willy nilly')
if Settings.USE_TYPED_ARRAYS != 2: return self.skip('requires ta2')
if not self.can_dlfcn(): return
if Building.LLVM_OPTS: return self.skip('LLVM opts will optimize out parent_func')
@ -3403,7 +3347,6 @@ def process(filename):
post_build=self.dlfcn_post_build)
def test_dlfcn_alias(self):
if Settings.USE_TYPED_ARRAYS != 2: return self.skip('requires ta2')
if Settings.ASM_JS: return self.skip('this is not a valid case - libraries should not be able to access their parents globals willy nilly')
Settings.LINKABLE = 1
@ -3454,7 +3397,6 @@ def process(filename):
def test_dlfcn_varargs(self):
if Settings.ASM_JS: return self.skip('this is not a valid case - libraries should not be able to access their parents globals willy nilly')
if Settings.USE_TYPED_ARRAYS != 2: return self.skip('requires ta2')
if not self.can_dlfcn(): return
@ -3511,7 +3453,6 @@ def process(filename):
post_build=self.dlfcn_post_build)
def test_dlfcn_self(self):
if Settings.USE_TYPED_ARRAYS != 2: return self.skip('requires ta2')
if os.environ.get('EMCC_FAST_COMPILER') != '0': return self.skip('todo in fastcomp')
Settings.DLOPEN_SUPPORT = 1
@ -3577,7 +3518,6 @@ def process(filename):
self.do_run(src, 'success', force_c=True, post_build=self.dlfcn_post_build)
def test_dlfcn_stacks(self):
if Settings.USE_TYPED_ARRAYS != 2: return self.skip('snprintf needs ta2 to be able to bitcast int<->float')
if not self.can_dlfcn(): return
self.prep_dlfcn_lib()
@ -4058,21 +3998,18 @@ Have even and odd!
self.do_run_from_file(src, output)
def test_parseInt(self):
if Settings.USE_TYPED_ARRAYS != 2: return self.skip('i64 mode 1 requires ta2')
if Settings.QUANTUM_SIZE == 1: return self.skip('Q1 and I64_1 do not mix well yet')
src = open(path_from_root('tests', 'parseInt', 'src.c'), 'r').read()
expected = open(path_from_root('tests', 'parseInt', 'output.txt'), 'r').read()
self.do_run(src, expected)
def test_transtrcase(self):
if Settings.USE_TYPED_ARRAYS != 2: return self.skip('musl libc needs ta2')
test_path = path_from_root('tests', 'core', 'test_transtrcase')
src, output = (test_path + s for s in ('.in', '.out'))
self.do_run_from_file(src, output)
def test_printf(self):
if Settings.USE_TYPED_ARRAYS != 2: return self.skip('i64 mode 1 requires ta2')
self.banned_js_engines = [NODE_JS, V8_ENGINE] # SpiderMonkey and V8 do different things to float64 typed arrays, un-NaNing, etc.
src = open(path_from_root('tests', 'printf', 'test.c'), 'r').read()
expected = [open(path_from_root('tests', 'printf', 'output.txt'), 'r').read(),
@ -4144,9 +4081,8 @@ Have even and odd!
def test_sscanf_2(self):
# doubles
if Settings.USE_TYPED_ARRAYS == 2:
for ftype in ['float', 'double']:
src = r'''
for ftype in ['float', 'double']:
src = r'''
#include <stdio.h>
int main(){
@ -4185,14 +4121,14 @@ Have even and odd!
return 0;
}
'''
if ftype == 'float':
self.do_run(src.replace('%lf', '%f').replace('double', 'float'), '''Pass: 1.234568 1.234568
if ftype == 'float':
self.do_run(src.replace('%lf', '%f').replace('double', 'float'), '''Pass: 1.234568 1.234568
Pass: 123456.789063 123456.789063
Pass: 123456.789063 123456.789063
Pass: 0.000012 0.000012
Pass: 0.000012 0.000012''')
else:
self.do_run(src, '''Pass: 1.234568 1.234568
else:
self.do_run(src, '''Pass: 1.234568 1.234568
Pass: 123456.789000 123456.789000
Pass: 123456.789000 123456.789000
Pass: 0.000012 0.000012
@ -4223,8 +4159,6 @@ Pass: 0.000012 0.000012''')
def test_sscanf_3(self):
if self.run_name.startswith('s_'): return self.skip('This test requires linking to musl lib for sscanf.')
# i64
if not Settings.USE_TYPED_ARRAYS == 2: return self.skip('64-bit sscanf only supported in ta2')
test_path = path_from_root('tests', 'core', 'test_sscanf_3')
src, output = (test_path + s for s in ('.in', '.out'))
@ -4253,7 +4187,6 @@ Pass: 0.000012 0.000012''')
def test_sscanf_skip(self):
if self.run_name.startswith('s_'): return self.skip('This test requires linking to musl lib for sscanf.')
if Settings.USE_TYPED_ARRAYS != 2: return self.skip("need ta2 for full i64")
test_path = path_from_root('tests', 'core', 'test_sscanf_skip')
src, output = (test_path + s for s in ('.in', '.out'))
@ -4269,7 +4202,6 @@ Pass: 0.000012 0.000012''')
def test_sscanf_hex(self):
if self.run_name.startswith('s_'): return self.skip('This test requires linking to musl lib for sscanf.')
if Settings.USE_TYPED_ARRAYS != 2: return self.skip('requires ta2')
test_path = path_from_root('tests', 'core', 'test_sscanf_hex')
src, output = (test_path + s for s in ('.in', '.out'))
@ -4994,7 +4926,6 @@ PORT: 3979
def test_jansson(self):
return self.skip('currently broken')
if Settings.USE_TYPED_ARRAYS != 2: return self.skip('requires ta2')
if Settings.SAFE_HEAP: return self.skip('jansson is not safe-heap safe')
src = '''
@ -5156,7 +5087,6 @@ int main(void) {
### 'Medium' tests
def test_fannkuch(self):
if Settings.USE_TYPED_ARRAYS != 2: return self.skip('musl libc needs ta2')
if os.environ.get('EMCC_FAST_COMPILER') == '0': return self.skip('needs fastcomp')
results = [ (1,0), (2,1), (3,2), (4,4), (5,7), (6,10), (7, 16), (8,22) ]
for i, j in results:
@ -5165,7 +5095,8 @@ int main(void) {
def test_raytrace(self):
if self.emcc_args is None: return self.skip('requires emcc')
if Settings.USE_TYPED_ARRAYS == 2: return self.skip('Relies on double value rounding, extremely sensitive')
# TODO: Should we remove this test?
return self.skip('Relies on double value rounding, extremely sensitive')
src = open(path_from_root('tests', 'raytrace.cpp'), 'r').read().replace('double', 'float')
output = open(path_from_root('tests', 'raytrace.ppm'), 'r').read()
@ -5316,8 +5247,7 @@ return malloc(size);
Settings.CORRECT_OVERFLOWS = 1
Settings.CHECK_OVERFLOWS = 0
if Settings.USE_TYPED_ARRAYS == 2:
Settings.CORRECT_SIGNS = 1
Settings.CORRECT_SIGNS = 1
def test():
self.do_run(path_from_root('tests', 'cubescript'), '*\nTemp is 33\n9\n5\nhello, everyone\n*', main_file='command.cpp')
@ -5354,7 +5284,6 @@ return malloc(size);
def test_simd(self):
if self.is_emterpreter(): return self.skip('todo')
if os.environ.get('EMCC_FAST_COMPILER') == '0': return self.skip('needs fastcomp')
if Settings.USE_TYPED_ARRAYS != 2: return self.skip('needs ta2')
test_path = path_from_root('tests', 'core', 'test_simd')
src, output = (test_path + s for s in ('.in', '.out'))
@ -5373,8 +5302,6 @@ return malloc(size);
def test_simd3(self):
return self.skip('FIXME: this appears to be broken')
if Settings.USE_TYPED_ARRAYS != 2: return self.skip('needs ta2')
test_path = path_from_root('tests', 'core', 'test_simd3')
src, output = (test_path + s for s in ('.in', '.out'))
@ -5573,8 +5500,6 @@ def process(filename):
force_c=True)
def test_zlib(self):
if not Settings.USE_TYPED_ARRAYS == 2: return self.skip('works in general, but cached build will be optimized and fail, so disable this')
if self.emcc_args is not None and '-O2' in self.emcc_args and 'ASM_JS=0' not in self.emcc_args: # without asm, closure minifies Math.imul badly
self.emcc_args += ['--closure', '1'] # Use closure here for some additional coverage
@ -5703,11 +5628,7 @@ def process(filename):
Building.COMPILER_TEST_OPTS = filter(lambda x: x != '-g', Building.COMPILER_TEST_OPTS) # remove -g, so we have one test without it by default
if Settings.USE_TYPED_ARRAYS == 2:
Settings.CORRECT_SIGNS = 1
else:
Settings.CORRECT_SIGNS = 2
Settings.CORRECT_SIGNS_LINES = ["mqc.c:566", "mqc.c:317"]
Settings.CORRECT_SIGNS = 1
post = '''
def process(filename):
@ -5890,9 +5811,6 @@ def process(filename):
else:
os.environ['EMCC_LEAVE_INPUTS_RAW'] = '1'
if '_ta2' in shortname and not Settings.USE_TYPED_ARRAYS == 2:
print self.skip('case "%s" only relevant for ta2' % shortname)
continue
if '_noasm' in shortname and Settings.ASM_JS:
print self.skip('case "%s" not relevant for asm.js' % shortname)
continue
@ -5929,8 +5847,6 @@ def process(filename):
self.emcc_args = emcc_args
def test_fuzz(self):
if Settings.USE_TYPED_ARRAYS != 2: return self.skip('needs ta2')
Building.COMPILER_TEST_OPTS += ['-I' + path_from_root('tests', 'fuzz', 'include'), '-Wno-warn-absolute-paths']
def run_all(x):
@ -6016,7 +5932,6 @@ def process(filename):
def test_corruption(self):
if Settings.ASM_JS: return self.skip('cannot use corruption checks in asm')
if Settings.USE_TYPED_ARRAYS != 2: return self.skip('needs ta2 for actual test')
Settings.CORRUPTION_CHECK = 1
@ -6045,7 +5960,6 @@ def process(filename):
def test_corruption_2(self):
if Settings.ASM_JS: return self.skip('cannot use corruption checks in asm')
if Settings.USE_TYPED_ARRAYS != 2: return self.skip('needs ta2 for actual test')
Settings.SAFE_HEAP = 1
Settings.CORRUPTION_CHECK = 1
@ -6058,7 +5972,6 @@ def process(filename):
def test_corruption_3(self):
if Settings.ASM_JS: return self.skip('cannot use corruption checks in asm')
if Settings.USE_TYPED_ARRAYS != 2: return self.skip('needs ta2 for actual test')
Settings.CORRUPTION_CHECK = 1
@ -6834,7 +6747,8 @@ def process(filename):
def test_safe_heap(self):
if not Settings.SAFE_HEAP: return self.skip('We need SAFE_HEAP to test SAFE_HEAP')
if Settings.USE_TYPED_ARRAYS == 2: return self.skip('It is ok to violate the load-store assumption with TA2')
# TODO: Should we remove this test?
return self.skip('It is ok to violate the load-store assumption with TA2')
if Building.LLVM_OPTS: return self.skip('LLVM can optimize away the intermediate |x|')
src = '''
@ -6941,7 +6855,6 @@ def process(filename):
def test_source_map(self):
if self.is_emterpreter(): return self.skip('todo')
if Settings.USE_TYPED_ARRAYS != 2: return self.skip("doesn't pass without typed arrays")
if NODE_JS not in JS_ENGINES: return self.skip('sourcemapper requires Node to run')
if '-g' not in Building.COMPILER_TEST_OPTS: Building.COMPILER_TEST_OPTS.append('-g')
@ -7032,7 +6945,6 @@ def process(filename):
def test_exception_source_map(self):
if self.is_emterpreter(): return self.skip('todo')
if Settings.USE_TYPED_ARRAYS != 2: return self.skip("doesn't pass without typed arrays")
if '-g4' not in Building.COMPILER_TEST_OPTS: Building.COMPILER_TEST_OPTS.append('-g4')
if NODE_JS not in JS_ENGINES: return self.skip('sourcemapper requires Node to run')
if os.environ.get('EMCC_FAST_COMPILER') == '0': return self.skip('requires fastcomp')
@ -7214,12 +7126,6 @@ def process(filename):
}
'''
if Settings.USE_TYPED_ARRAYS != 2: # the errors here are very specific to non-i64 mode 1
Settings.CORRECT_ROUNDINGS = 0
self.do_run(src.replace('TYPE', 'long long'), '*-3**2**-6**5*') # JS floor operations, always to the negative. This is an undetected error here!
self.do_run(src.replace('TYPE', 'int'), '*-2**2**-5**5*') # We get these right, since they are 32-bit and we can shortcut using the |0 trick
self.do_run(src.replace('TYPE', 'unsigned int'), '*-2**2**-6**5*')
Settings.CORRECT_ROUNDINGS = 1
Settings.CORRECT_SIGNS = 1 # To be correct here, we need sign corrections as well
self.do_run(src.replace('TYPE', 'long long'), '*-2**2**-5**5*') # Correct
@ -7227,23 +7133,6 @@ def process(filename):
self.do_run(src.replace('TYPE', 'unsigned int'), '*2147483645**2**-5**5*') # Correct
Settings.CORRECT_SIGNS = 0
if Settings.USE_TYPED_ARRAYS != 2: # the errors here are very specific to non-i64 mode 1
Settings.CORRECT_ROUNDINGS = 2
Settings.CORRECT_ROUNDINGS_LINES = ["src.cpp:13"] # Fix just the last mistake
self.do_run(src.replace('TYPE', 'long long'), '*-3**2**-5**5*')
self.do_run(src.replace('TYPE', 'int'), '*-2**2**-5**5*') # Here we are lucky and also get the first one right
self.do_run(src.replace('TYPE', 'unsigned int'), '*-2**2**-5**5*')
# And reverse the check with = 2
if Settings.USE_TYPED_ARRAYS != 2: # the errors here are very specific to non-i64 mode 1
Settings.CORRECT_ROUNDINGS = 3
Settings.CORRECT_ROUNDINGS_LINES = ["src.cpp:999"]
self.do_run(src.replace('TYPE', 'long long'), '*-2**2**-5**5*')
self.do_run(src.replace('TYPE', 'int'), '*-2**2**-5**5*')
Settings.CORRECT_SIGNS = 1 # To be correct here, we need sign corrections as well
self.do_run(src.replace('TYPE', 'unsigned int'), '*2147483645**2**-5**5*')
Settings.CORRECT_SIGNS = 0
def test_float_literals(self):
self.do_run_from_file(path_from_root('tests', 'test_float_literals.cpp'), path_from_root('tests', 'test_float_literals.out'))

Просмотреть файл

@ -165,7 +165,7 @@ There is NO warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR P
if opt_level == 0 or '-g' in params: assert 'function _main() {' in generated or 'function _main(){' in generated, 'Should be unminified'
elif opt_level >= 2: assert ('function _main(){' in generated or '"use asm";var a=' in generated), 'Should be whitespace-minified'
# emcc -s RELOOP=1 src.cpp ==> should pass -s to emscripten.py. --typed-arrays is a convenient alias for -s USE_TYPED_ARRAYS
# emcc -s RELOOP=1 src.cpp ==> should pass -s to emscripten.py.
for params, test, text in [
(['-O2'], lambda generated: 'function intArrayToString' in generated, 'shell has unminified utilities'),
(['-O2', '--closure', '1'], lambda generated: 'function intArrayToString' not in generated and ';function' in generated, 'closure minifies the shell, removes whitespace'),
@ -185,13 +185,8 @@ There is NO warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR P
(['-O2'], lambda generated: 'var b=0' in generated and '"use asm";var a=' in generated and 'function _main' not in generated, 'very minified, no function names'),
#(['-O2', '-g4'], lambda generated: 'var b=0' not in generated and 'var b = 0' not in generated and 'function _main' in generated, 'same as -g3 for now'),
(['-s', 'INLINING_LIMIT=0'], lambda generated: 'function _dump' in generated, 'no inlining without opts'),
(['-s', 'USE_TYPED_ARRAYS=0'], lambda generated: 'new Int32Array' not in generated, 'disable typed arrays'),
(['-s', 'USE_TYPED_ARRAYS=1'], lambda generated: 'IHEAPU = ' in generated, 'typed arrays 1 selected'),
([], lambda generated: 'Module["_dump"]' not in generated, 'dump is not exported by default'),
(['-s', 'EXPORTED_FUNCTIONS=["_main", "_dump"]'], lambda generated: 'Module["_dump"]' in generated, 'dump is now exported'),
(['--typed-arrays', '0'], lambda generated: 'new Int32Array' not in generated, 'disable typed arrays'),
(['--typed-arrays', '1'], lambda generated: 'IHEAPU = ' in generated, 'typed arrays 1 selected'),
(['--typed-arrays', '2'], lambda generated: 'new Uint16Array' in generated and 'new Uint32Array' in generated, 'typed arrays 2 selected'),
(['--llvm-opts', '1'], lambda generated: '_puts(' in generated, 'llvm opts requested'),
([], lambda generated: '// The Module object' in generated, 'without opts, comments in shell code'),
(['-O2'], lambda generated: '// The Module object' not in generated, 'with opts, no comments in shell code'),
@ -200,7 +195,6 @@ There is NO warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR P
]:
print params, text
self.clear()
if os.environ.get('EMCC_FAST_COMPILER') != '0' and text in ['disable typed arrays', 'typed arrays 1 selected']: continue
output = Popen([PYTHON, compiler, path_from_root('tests', 'hello_world_loop.cpp'), '-o', 'a.out.js'] + params, stdout=PIPE, stderr=PIPE).communicate()
assert len(output[0]) == 0, output[0]
assert os.path.exists('a.out.js'), '\n'.join(output)

Просмотреть файл

@ -155,8 +155,6 @@ class sockets(BrowserCore):
self.do_run(src, '120.86.52.18\n120.86.52.18\n')
def test_inet4(self):
if Settings.USE_TYPED_ARRAYS != 2: return self.skip('requires ta2')
src = r'''
#include <stdio.h>
#include <arpa/inet.h>

Просмотреть файл

@ -1574,10 +1574,6 @@ class Building:
def can_build_standalone():
return not Settings.BUILD_AS_SHARED_LIB and not Settings.LINKABLE and not Settings.EXPORT_ALL
@staticmethod
def can_use_unsafe_opts():
return Settings.USE_TYPED_ARRAYS == 2
@staticmethod
def can_inline():
return Settings.INLINING_LIMIT == 0
@ -1603,92 +1599,14 @@ class Building:
llvm-as < /dev/null | opt -std-compile-opts -disable-output -debug-pass=Arguments
'''
assert 0 <= optimization_level <= 3
unsafe = Building.can_use_unsafe_opts()
opts = []
if optimization_level > 0:
if unsafe:
if not Building.can_inline():
opts.append('-disable-inlining')
if not Building.can_build_standalone():
# -O1 does not have -gobaldce, which removes stuff that is needed for libraries and linkables
optimization_level = min(1, optimization_level)
opts.append('-O%d' % optimization_level)
#print '[unsafe: %s]' % ','.join(opts)
else:
allow_nonportable = False
optimize_size = True
use_aa = False
# PassManagerBuilder::populateModulePassManager
if allow_nonportable and use_aa: # ammo.js results indicate this can be nonportable
opts.append('-tbaa')
opts.append('-basicaa') # makes fannkuch slow but primes fast
if Building.can_build_standalone():
opts += Building.get_safe_internalize()
opts.append('-globalopt')
opts.append('-ipsccp')
opts.append('-deadargelim')
if allow_nonportable: opts.append('-instcombine')
opts.append('-simplifycfg')
opts.append('-prune-eh')
if Building.can_inline(): opts.append('-inline')
opts.append('-functionattrs')
if optimization_level > 2:
opts.append('-argpromotion')
# XXX Danger: Can turn a memcpy into something that violates the
# load-store consistency hypothesis. See hashnum() in Lua.
# Note: this opt is of great importance for raytrace...
if allow_nonportable: opts.append('-scalarrepl')
if allow_nonportable: opts.append('-early-cse') # ?
opts.append('-simplify-libcalls')
opts.append('-jump-threading')
if allow_nonportable: opts.append('-correlated-propagation') # ?
opts.append('-simplifycfg')
if allow_nonportable: opts.append('-instcombine')
opts.append('-tailcallelim')
opts.append('-simplifycfg')
opts.append('-reassociate')
opts.append('-loop-rotate')
opts.append('-licm')
opts.append('-loop-unswitch') # XXX should depend on optimize_size
if allow_nonportable: opts.append('-instcombine')
if Settings.QUANTUM_SIZE == 4: opts.append('-indvars') # XXX this infinite-loops raytrace on q1 (loop in |new node_t[count]| has 68 hardcoded &not fixed)
if allow_nonportable: opts.append('-loop-idiom') # ?
opts.append('-loop-deletion')
opts.append('-loop-unroll')
##### not in llvm-3.0. but have | #addExtensionsToPM(EP_LoopOptimizerEnd, MPM);| if allow_nonportable: opts.append('-instcombine')
# XXX Danger: Messes up Lua output for unknown reasons
# Note: this opt is of minor importance for raytrace...
if optimization_level > 1 and allow_nonportable: opts.append('-gvn')
opts.append('-memcpyopt') # Danger?
opts.append('-sccp')
if allow_nonportable: opts.append('-instcombine')
opts.append('-jump-threading')
opts.append('-correlated-propagation')
opts.append('-dse')
#addExtensionsToPM(EP_ScalarOptimizerLate, MPM)
opts.append('-adce')
opts.append('-simplifycfg')
if allow_nonportable: opts.append('-instcombine')
opts.append('-strip-dead-prototypes')
if Building.can_build_standalone():
opts.append('-globaldce')
if optimization_level > 1: opts.append('-constmerge')
if not Building.can_inline():
opts.append('-disable-inlining')
if not Building.can_build_standalone():
# -O1 does not have -gobaldce, which removes stuff that is needed for libraries and linkables
optimization_level = min(1, optimization_level)
opts.append('-O%d' % optimization_level)
Building.LLVM_OPT_OPTS = opts
return opts