This has been deprecated for a long time and does not work with
fastcomp. It is no longer supported to not use fastcomp, so these
code paths were all dead.
This commit is contained in:
Bruce Mitchener 2015-04-07 10:06:43 +07:00
Родитель 72b674e1e0
Коммит b268e4ebfa
13 изменённых файлов: 18 добавлений и 338 удалений

11
emcc
Просмотреть файл

@ -427,7 +427,6 @@ try:
js_libraries = []
bind = False
emrun = False
jcache = False
save_bc = False
memory_init_file = None
use_preload_cache = False
@ -637,8 +636,7 @@ try:
logging.warning('--remove-duplicates is deprecated as it is no longer needed. If you cannot link without it, file a bug with a testcase')
newargs[i] = ''
elif newargs[i] == '--jcache':
logging.warning('jcache is deprecated')
jcache = True
logging.error('jcache is no longer supported')
newargs[i] = ''
elif newargs[i] == '--cache':
check_bad_eq(newargs[i])
@ -922,10 +920,6 @@ try:
logging.error('Compiler settings are incompatible with fastcomp. You can fall back to the older compiler core, although that is not recommended, see http://kripken.github.io/emscripten-site/docs/building_from_source/LLVM-Backend.html')
raise e
if jcache:
logging.warning('jcache is deprecated and not supported in fastcomp (you should not need it anyhow), disabling')
jcache = False
fastcomp_opts = []
if shared.Settings.NO_EXIT_RUNTIME:
pre_fastcomp_opts += ['-emscripten-no-exit-runtime']
@ -1336,7 +1330,6 @@ try:
# Emscripten
logging.debug('LLVM => JS')
extra_args = [] if not js_libraries else ['--libraries', ','.join(map(os.path.abspath, js_libraries))]
if jcache: extra_args.append('--jcache')
final = shared.Building.emscripten(final, append_ext=False, extra_args=extra_args)
if DEBUG: save_intermediate('original')
@ -1455,7 +1448,7 @@ try:
passes += ['minifyWhitespace']
logging.debug('applying js optimization passes: %s', ' '.join(passes))
misc_temp_files.note(final)
final = shared.Building.js_optimizer(final, passes, jcache, debug_level >= 4, js_optimizer_extra_info, just_split=just_split, just_concat=just_concat)
final = shared.Building.js_optimizer(final, passes, debug_level >= 4, js_optimizer_extra_info, just_split=just_split, just_concat=just_concat)
misc_temp_files.note(final)
js_transform_tempfiles.append(final)
if DEBUG: save_intermediate(title, suffix='js' if 'emitJSON' not in passes else 'json')

Просмотреть файл

@ -68,7 +68,7 @@ def process_funcs((i, funcs_file, meta, settings_file, compiler, forwarded_file,
return out
def emscript(infile, settings, outfile, libraries=[], compiler_engine=None,
jcache=None, temp_files=None, DEBUG=None, DEBUG_CACHE=None):
temp_files=None, DEBUG=None, DEBUG_CACHE=None):
"""Runs the emscripten LLVM-to-JS compiler. We parallelize as much as possible
Args:
@ -87,8 +87,6 @@ def emscript(infile, settings, outfile, libraries=[], compiler_engine=None,
if DEBUG: logging.debug('emscript: ll=>js')
if jcache: jcache.ensure()
# Pre-scan ll and alter settings as necessary
if DEBUG: t = time.time()
ll = open(infile).read()
@ -159,34 +157,12 @@ def emscript(infile, settings, outfile, libraries=[], compiler_engine=None,
pre_file = temp_files.get('.pre.ll').name
pre_input = ''.join(pre) + '\n' + meta
out = None
if jcache:
keys = [pre_input, settings_text, ','.join(libraries)]
shortkey = jcache.get_shortkey(keys)
if DEBUG_CACHE: logging.debug('shortkey', shortkey)
out = jcache.get(shortkey, keys)
if DEBUG_CACHE and not out:
dfpath = os.path.join(get_configuration().TEMP_DIR, "ems_" + shortkey)
dfp = open(dfpath, 'w')
dfp.write(pre_input)
dfp.write("\n\n========================== settings_text\n\n")
dfp.write(settings_text)
dfp.write("\n\n========================== libraries\n\n")
dfp.write("\n".join(libraries))
dfp.close()
logging.debug(' cache miss, key data dumped to %s' % dfpath)
if out and DEBUG: logging.debug(' loading pre from jcache')
if not out:
open(pre_file, 'w').write(pre_input)
#print >> sys.stderr, 'running', str([settings_file, pre_file, 'pre'] + libraries).replace("'/", "'") # see funcs
out = jsrun.run_js(compiler, compiler_engine, [settings_file, pre_file, 'pre'] + libraries, stdout=subprocess.PIPE, stderr=STDERR_FILE,
cwd=path_from_root('src'))
assert '//FORWARDED_DATA:' in out, 'Did not receive forwarded data in pre output - process failed?'
if jcache:
if DEBUG: logging.debug(' saving pre to jcache')
jcache.set(shortkey, keys, out)
pre, forwarded_data = out.split('//FORWARDED_DATA:')
forwarded_file = temp_files.get('.json').name
pre_input = None
@ -215,32 +191,13 @@ def emscript(infile, settings, outfile, libraries=[], compiler_engine=None,
settings['EXPORTED_FUNCTIONS'] = forwarded_json['EXPORTED_FUNCTIONS']
save_settings()
chunks = cache_module.chunkify(
funcs, chunk_size,
jcache.get_cachename('emscript_files') if jcache else None)
chunks = cache_module.chunkify(funcs, chunk_size)
#sys.exit(1)
#chunks = [chunks[0]] # pick specific chunks for debugging/profiling
funcs = None
if jcache:
# load chunks from cache where we can # TODO: ignore small chunks
cached_outputs = []
def load_from_cache(chunk):
keys = [settings_text, forwarded_data, chunk]
shortkey = jcache.get_shortkey(keys) # TODO: share shortkeys with later code
out = jcache.get(shortkey, keys) # this is relatively expensive (pickling?)
if out:
cached_outputs.append(out)
return False
return True
chunks = filter(load_from_cache, chunks)
if len(cached_outputs) > 0:
if out and DEBUG: logging.debug(' loading %d funcchunks from jcache' % len(cached_outputs))
else:
cached_outputs = []
# TODO: minimize size of forwarded data from funcs to what we actually need
if len(chunks) > 0:
@ -254,8 +211,7 @@ def emscript(infile, settings, outfile, libraries=[], compiler_engine=None,
funcs_file = temp_files.get('.func_%d.ll' % i).name
f = open(funcs_file, 'w')
f.write(chunks[i])
if not jcache:
chunks[i] = None # leave chunks array alive (need its length later)
chunks[i] = None # leave chunks array alive (need its length later)
f.write('\n')
f.write(meta)
f.close()
@ -275,19 +231,8 @@ def emscript(infile, settings, outfile, libraries=[], compiler_engine=None,
else:
outputs = []
if jcache:
# save chunks to cache
for i in range(len(chunks)):
chunk = chunks[i]
keys = [settings_text, forwarded_data, chunk]
shortkey = jcache.get_shortkey(keys)
jcache.set(shortkey, keys, outputs[i])
if out and DEBUG and len(chunks) > 0: logging.debug(' saving %d funcchunks to jcache' % len(chunks))
chunks = None
if jcache: outputs += cached_outputs # TODO: preserve order
outputs = [output.split('//FORWARDED_DATA:') for output in outputs]
for output in outputs:
assert len(output) == 2, 'Did not receive forwarded data in an output - process failed? We only got: ' + output[0][-3000:]
@ -749,7 +694,7 @@ Runtime.getTempRet0 = asm['getTempRet0'];
# if the experiment fails)
def emscript_fast(infile, settings, outfile, libraries=[], compiler_engine=None,
jcache=None, temp_files=None, DEBUG=None, DEBUG_CACHE=None):
temp_files=None, DEBUG=None, DEBUG_CACHE=None):
"""Runs the emscripten LLVM-to-JS compiler.
Args:
@ -1562,7 +1507,7 @@ else:
logging.critical('Non-fastcomp compiler is no longer available, please use fastcomp or an older version of emscripten')
sys.exit(1)
def main(args, compiler_engine, cache, jcache, relooper, temp_files, DEBUG, DEBUG_CACHE):
def main(args, compiler_engine, cache, relooper, temp_files, DEBUG, DEBUG_CACHE):
# Prepare settings for serialization to JSON.
settings = {}
for setting in args.settings:
@ -1591,7 +1536,7 @@ def main(args, compiler_engine, cache, jcache, relooper, temp_files, DEBUG, DEBU
if DEBUG: logging.debug(' emscript: bootstrapping struct info complete')
emscript(args.infile, settings, args.outfile, libraries, compiler_engine=compiler_engine,
jcache=jcache, temp_files=temp_files, DEBUG=DEBUG, DEBUG_CACHE=DEBUG_CACHE)
temp_files=temp_files, DEBUG=DEBUG, DEBUG_CACHE=DEBUG_CACHE)
def _main(environ):
response_file = True
@ -1635,10 +1580,6 @@ def _main(environ):
metavar='FOO=BAR',
help=('Overrides for settings defined in settings.js. '
'May occur multiple times.'))
parser.add_option('-j', '--jcache',
action='store_true',
default=False,
help=('Enable jcache (ccache-like caching of compilation results, for faster incremental builds).'))
parser.add_option('-T', '--temp-dir',
default=None,
help=('Where to create temporary files.'))
@ -1700,7 +1641,6 @@ WARNING: You should normally never use this! Use emcc instead.
keywords,
compiler_engine=keywords.compiler,
cache=cache,
jcache=cache_module.JCache(cache) if keywords.jcache else None,
relooper=relooper,
temp_files=temp_files,
DEBUG=DEBUG,

Просмотреть файл

@ -432,8 +432,7 @@ Options that are modified or new in *emcc* are listed below:
the caching mechanism can get confused. Clearing the cache can fix
weird problems related to cache incompatibilities, like *Clang*
failing to link with library files. This also clears other cached
data like the jcache and the bootstrapped relooper. After the cache
is cleared, this process will exit.
data. After the cache is cleared, this process will exit.
"--save-bc PATH"
When compiling to JavaScript or HTML, this option will save a copy

Просмотреть файл

@ -236,7 +236,6 @@ def get_mapped_items():
mapped_wiki_inline_code['EmscriptenBatteryEvent']=':c:type:`EmscriptenBatteryEvent`'
mapped_wiki_inline_code['emscripten_async_prepare()']=':c:func:`emscripten_async_prepare`'
mapped_wiki_inline_code['EmscriptenFocusEvent']=':c:type:`EmscriptenFocusEvent`'
mapped_wiki_inline_code['emscripten_jcache_printf']=':c:func:`emscripten_jcache_printf`'
mapped_wiki_inline_code['emscripten_async_prepare_data()']=':c:func:`emscripten_async_prepare_data`'
mapped_wiki_inline_code['removeRunDependency']=':js:func:`removeRunDependency`'
mapped_wiki_inline_code['EMSCRIPTEN_EVENT_DEVICEORIENTATION']=':c:macro:`EMSCRIPTEN_EVENT_DEVICEORIENTATION`'
@ -255,7 +254,6 @@ def get_mapped_items():
mapped_wiki_inline_code['EMSCRIPTEN_RESULT_INVALID_PARAM']=':c:macro:`EMSCRIPTEN_RESULT_INVALID_PARAM`'
mapped_wiki_inline_code['cwrap()']=':js:func:`cwrap`'
mapped_wiki_inline_code['emscripten_vibrate()']=':c:func:`emscripten_vibrate`'
mapped_wiki_inline_code['emscripten_jcache_printf()']=':c:func:`emscripten_jcache_printf`'
mapped_wiki_inline_code['emscripten_get_fullscreen_status()']=':c:func:`emscripten_get_fullscreen_status`'
mapped_wiki_inline_code['emscripten_run_script_int']=':c:func:`emscripten_run_script_int`'
mapped_wiki_inline_code['EMSCRIPTEN_NETWORK_WEBRTC']=':c:macro:`EMSCRIPTEN_NETWORK_WEBRTC`'

Просмотреть файл

@ -369,7 +369,7 @@ Options that are modified or new in *emcc* are listed below:
``--clear-cache``
Manually clears the cache of compiled Emscripten system libraries (libc++, libc++abi, libc).
This is normally handled automatically, but if you update LLVM in-place (instead of having a different directory for a new version), the caching mechanism can get confused. Clearing the cache can fix weird problems related to cache incompatibilities, like *Clang* failing to link with library files. This also clears other cached data like the jcache and the bootstrapped relooper. After the cache is cleared, this process will exit.
This is normally handled automatically, but if you update LLVM in-place (instead of having a different directory for a new version), the caching mechanism can get confused. Clearing the cache can fix weird problems related to cache incompatibilities, like *Clang* failing to link with library files. This also clears other cached data. After the cache is cleared, this process will exit.
.. _emcc-clear-ports:

Просмотреть файл

@ -7885,25 +7885,6 @@ LibraryManager.library = {
return Math.random();
},
emscripten_jcache_printf___deps: ['_formatString'],
emscripten_jcache_printf_: function(varargs) {
// XXX this is probably broken
var MAX = 10240;
if (!_emscripten_jcache_printf_.buffer) {
_emscripten_jcache_printf_.buffer = _malloc(MAX);
}
var i = 0;
do {
var curr = {{{ makeGetValue('varargs', '0', 'i8') }}};
varargs += {{{ STACK_ALIGN }}};
{{{ makeSetValue('_emscripten_jcache_printf_.buffer', 'i', 'curr', 'i8') }}};
i++;
assert(i*{{{ STACK_ALIGN }}} < MAX);
} while (curr != 0);
Module.print(intArrayToString(__formatString(_emscripten_jcache_printf_.buffer, varargs)).replace('\\n', ''));
Runtime.stackAlloc(-4*i); // free up the stack space we know is ok to free
},
emscripten_asm_const: function(code) {
Runtime.asmConsts[code]();
},

Просмотреть файл

@ -240,30 +240,6 @@ int emscripten_get_callstack(int flags, char *out, int maxbytes);
/* Internal APIs. Be careful with these. */
/* ===================================== */
/*
* jcache-friendly printf. printf in general will receive a string
* literal, which becomes a global constant, which invalidates all
* jcache entries. emscripten_jcache_printf is parsed before
* clang into something without any string literals, so you can
* add such printouts to your code and only the (chunk containing
* the) function you modify will be invalided and recompiled.
*
* Note in particular that you need to already have a call to this
* function in your code *before* you add one and do an incremental
* build, so that adding an external reference does not invalidate
* everything.
*
* This function assumes the first argument is a string literal
* (otherwise you don't need it), and the other arguments, if any,
* are neither strings nor complex expressions (but just simple
* variables). (You can create a variable to store a complex
* expression on the previous line, if necessary.)
*/
#ifdef __cplusplus
void emscripten_jcache_printf(const char *format, ...);
void emscripten_jcache_printf_(...); /* internal use */
#endif
/* Helper API for EM_ASM - do not call this yourself */
void emscripten_asm_const(const char *code);
int emscripten_asm_const_int(const char *code, ...);

Просмотреть файл

@ -1,9 +0,0 @@
#include <iostream>
int main()
{
std::cout << "hello, world!" << std::endl;
std::cout << "hello, world!" << std::endl;
return 0;
}

Просмотреть файл

@ -1,10 +0,0 @@
#include <iostream>
#include <emscripten.h>
int main()
{
std::cout << "hello, world!" << std::endl;
emscripten_jcache_printf("waka %d waka\n", 5);
return 0;
}

Просмотреть файл

@ -1,11 +0,0 @@
#include <iostream>
#include <emscripten.h>
int main()
{
std::cout << "hello, world!" << std::endl;
emscripten_jcache_printf("waka %d waka\n", 5);
emscripten_jcache_printf("yet another printf %.2f %d\n", 5.5, 66);
return 0;
}

Просмотреть файл

@ -1,4 +1,4 @@
import os.path, sys, shutil, hashlib, cPickle, zlib, time
import os.path, sys, shutil, time
import tempfiles
@ -38,123 +38,10 @@ class Cache:
shutil.copyfile(temp, cachename)
return cachename
# JS-specific cache. We cache the results of compilation and optimization,
# so that in incremental builds we can just load from cache.
# We cache reasonably-large-sized chunks
class JCache:
def __init__(self, cache):
self.cache = cache
self.dirname = os.path.join(cache.dirname, 'jcache')
self.debug = cache.debug
def ensure(self):
self.cache.ensure()
shared.safe_ensure_dirs(self.dirname)
def get_shortkey(self, keys):
if type(keys) not in [list, tuple]:
keys = [keys]
ret = ''
for key in keys:
assert type(key) == str
ret += hashlib.md5(key).hexdigest()
return ret
def get_cachename(self, shortkey):
return os.path.join(self.dirname, shortkey)
# Returns a cached value, if it exists. Make sure the full key matches
def get(self, shortkey, keys):
if self.debug: print >> sys.stderr, 'jcache get?', shortkey
cachename = self.get_cachename(shortkey)
if not os.path.exists(cachename):
if self.debug: print >> sys.stderr, 'jcache none at all'
return
try:
data = cPickle.loads(zlib.decompress(open(cachename).read()))
except Exception, e:
if self.debug: print >> sys.stderr, 'jcache decompress/unpickle error:', e
return
if len(data) != 2:
if self.debug: print >> sys.stderr, 'jcache error in get'
return
oldkeys = data[0]
if len(oldkeys) != len(keys):
if self.debug: print >> sys.stderr, 'jcache collision (a)'
return
for i in range(len(oldkeys)):
if oldkeys[i] != keys[i]:
if self.debug: print >> sys.stderr, 'jcache collision (b)'
return
if self.debug: print >> sys.stderr, 'jcache win'
return data[1]
# Sets the cached value for a key (from get_key)
def set(self, shortkey, keys, value):
cachename = self.get_cachename(shortkey)
try:
f = open(cachename, 'w')
f.write(zlib.compress(cPickle.dumps([keys, value])))
f.close()
except Exception, e:
if self.debug: print >> sys.stderr, 'jcache compress/pickle error:', e
return
# for i in range(len(keys)):
# open(cachename + '.key' + str(i), 'w').write(keys[i])
# open(cachename + '.value', 'w').write(value)
# Given a set of functions of form (ident, text), and a preferred chunk size,
# generates a set of chunks for parallel processing and caching.
# It is very important to generate similar chunks in incremental builds, in
# order to maximize the chance of cache hits. To achieve that, we save the
# chunking used in the previous compilation of this phase, and we try to
# generate the same chunks, barring big differences in function sizes that
# violate our chunk size guideline. If caching is not used, chunking_file
# should be None
def chunkify(funcs, chunk_size, chunking_file, DEBUG=False):
previous_mapping = None
if chunking_file:
chunking_file = chunking_file
if os.path.exists(chunking_file):
try:
previous_mapping = cPickle.Unpickler(open(chunking_file, 'rb')).load() # maps a function identifier to the chunk number it will be in
#if DEBUG: print >> sys.stderr, 'jscache previous mapping', previous_mapping
except:
pass
def chunkify(funcs, chunk_size, DEBUG=False):
chunks = []
if previous_mapping:
# initialize with previous chunking
news = []
for func in funcs:
ident, data = func
assert ident, 'need names for jcache chunking'
if not ident in previous_mapping:
news.append(func)
else:
n = previous_mapping[ident]
while n >= len(chunks): chunks.append([])
chunks[n].append(func)
if DEBUG: print >> sys.stderr, 'jscache not in previous chunking', len(news)
# add news and adjust for new sizes
spilled = news
for i in range(len(chunks)):
chunk = chunks[i]
size = sum([len(func[1]) for func in chunk])
#if DEBUG: print >> sys.stderr, 'need spilling?', i, size, len(chunk), 'vs', chunk_size, 1.5*chunk_size
while size > 1.5*chunk_size and len(chunk) > 1:
spill = chunk.pop()
spilled.append(spill)
size -= len(spill[1])
#if DEBUG: print >> sys.stderr, 'jscache new + spilled', len(spilled)
for chunk in chunks:
size = sum([len(func[1]) for func in chunk])
while size < 0.66*chunk_size and len(spilled) > 0:
spill = spilled.pop()
chunk.append(spill)
size += len(spill[1])
chunks = filter(lambda chunk: len(chunk) > 0, chunks) # might have empty ones, eliminate them
funcs = spilled # we will allocate these into chunks as if they were normal inputs
#if DEBUG: print >> sys.stderr, 'leftover spills', len(spilled)
# initialize reasonably, the rest of the funcs we need to split out
curr = []
total_size = 0
@ -171,28 +58,6 @@ def chunkify(funcs, chunk_size, chunking_file, DEBUG=False):
if curr:
chunks.append(curr)
curr = None
if chunking_file:
# sort within each chunk, to keep the order identical
for chunk in chunks:
chunk.sort(key=lambda func: func[0])
# save new mapping info
new_mapping = {}
for i in range(len(chunks)):
chunk = chunks[i]
for ident, data in chunk:
assert ident not in new_mapping, 'cannot have duplicate names in jcache chunking'
new_mapping[ident] = i
cPickle.Pickler(open(chunking_file, 'wb')).dump(new_mapping)
#if DEBUG:
# for i in range(len(chunks)):
# chunk = chunks[i]
# print >> sys.stderr, 'final chunk', i, len(chunk)
# print >> sys.stderr, 'new mapping:', new_mapping
# if previous_mapping:
# for ident in set(previous_mapping.keys() + new_mapping.keys()):
# if previous_mapping.get(ident) != new_mapping.get(ident):
# print >> sys.stderr, 'mapping inconsistency', ident, previous_mapping.get(ident), new_mapping.get(ident)
return [''.join([func[1] for func in chunk]) for chunk in chunks] # remove function names
import shared

Просмотреть файл

@ -280,10 +280,7 @@ def run_on_chunk(command):
# avoid throwing keyboard interrupts from a child process
raise Exception()
def run_on_js(filename, passes, js_engine, jcache, source_map=False, extra_info=None, just_split=False, just_concat=False):
if isinstance(jcache, bool) and jcache: jcache = shared.JCache
if jcache: shared.JCache.ensure()
def run_on_js(filename, passes, js_engine, source_map=False, extra_info=None, just_split=False, just_concat=False):
if type(passes) == str:
passes = [passes]
@ -322,13 +319,6 @@ def run_on_js(filename, passes, js_engine, jcache, source_map=False, extra_info=
if cleanup:
passes = filter(lambda p: p != 'cleanup', passes) # we will do it manually
if not know_generated and jcache:
# JCache cannot be used without metadata, since it might reorder stuff, and that's dangerous since only generated can be reordered
# This means jcache does not work after closure compiler runs, for example. But you won't get much benefit from jcache with closure
# anyhow (since closure is likely the longest part of the build).
if DEBUG: print >>sys.stderr, 'js optimizer: no metadata, so disabling jcache'
jcache = False
if know_generated:
if not minify_globals:
pre = js[:start_funcs + len(start_funcs_marker)]
@ -393,7 +383,7 @@ EMSCRIPTEN_FUNCS();
if not just_split:
intended_num_chunks = int(round(cores * NUM_CHUNKS_PER_CORE))
chunk_size = min(MAX_CHUNK_SIZE, max(MIN_CHUNK_SIZE, total_size / intended_num_chunks))
chunks = shared.chunkify(funcs, chunk_size, jcache.get_cachename('jsopt') if jcache else None)
chunks = shared.chunkify(funcs, chunk_size)
else:
# keep same chunks as before
chunks = map(lambda f: f[1], funcs)
@ -402,23 +392,6 @@ EMSCRIPTEN_FUNCS();
if DEBUG and len(chunks) > 0: print >> sys.stderr, 'chunkification: num funcs:', len(funcs), 'actual num chunks:', len(chunks), 'chunk size range:', max(map(len, chunks)), '-', min(map(len, chunks))
funcs = None
if jcache:
# load chunks from cache where we can # TODO: ignore small chunks
cached_outputs = []
def load_from_cache(chunk):
keys = [chunk]
shortkey = shared.JCache.get_shortkey(keys) # TODO: share shortkeys with later code
out = shared.JCache.get(shortkey, keys)
if out:
cached_outputs.append(out)
return False
return True
chunks = filter(load_from_cache, chunks)
if len(cached_outputs) > 0:
if DEBUG: print >> sys.stderr, ' loading %d jsfuncchunks from jcache' % len(cached_outputs)
else:
cached_outputs = []
if len(chunks) > 0:
def write_chunk(chunk, i):
temp_file = temp_files.get('.jsfunc_%d.js' % i).name
@ -531,33 +504,19 @@ EMSCRIPTEN_FUNCS();
# just concat the outputs
for out_file in filenames:
f.write(open(out_file).read())
assert not jcache
f.write('\n')
if jcache:
for cached in cached_outputs:
f.write(cached); # TODO: preserve order
f.write('\n')
f.write(post);
# No need to write suffix: if there was one, it is inside post which exists when suffix is there
f.write('\n')
f.close()
if jcache:
# save chunks to cache
for i in range(len(chunks)):
chunk = chunks[i]
keys = [chunk]
shortkey = shared.JCache.get_shortkey(keys)
shared.JCache.set(shortkey, keys, open(filenames[i]).read())
if DEBUG and len(chunks) > 0: print >> sys.stderr, ' saving %d jsfuncchunks to jcache' % len(chunks)
return filename
def run(filename, passes, js_engine=shared.NODE_JS, jcache=False, source_map=False, extra_info=None, just_split=False, just_concat=False):
def run(filename, passes, js_engine=shared.NODE_JS, source_map=False, extra_info=None, just_split=False, just_concat=False):
if 'receiveJSON' in passes: just_split = True
if 'emitJSON' in passes: just_concat = True
js_engine = shared.listify(js_engine)
return temp_files.run_and_clean(lambda: run_on_js(filename, passes, js_engine, jcache, source_map, extra_info, just_split, just_concat))
return temp_files.run_and_clean(lambda: run_on_js(filename, passes, js_engine, source_map, extra_info, just_split, just_concat))
if __name__ == '__main__':
last = sys.argv[-1]

Просмотреть файл

@ -1693,8 +1693,8 @@ class Building:
return opts
@staticmethod
def js_optimizer(filename, passes, jcache=False, debug=False, extra_info=None, output_filename=None, just_split=False, just_concat=False):
ret = js_optimizer.run(filename, passes, NODE_JS, jcache, debug, extra_info, just_split, just_concat)
def js_optimizer(filename, passes, debug=False, extra_info=None, output_filename=None, just_split=False, just_concat=False):
ret = js_optimizer.run(filename, passes, NODE_JS, debug, extra_info, just_split, just_concat)
if output_filename:
safe_move(ret, output_filename)
ret = output_filename
@ -1828,7 +1828,6 @@ class Building:
# compatibility with existing emcc, etc. scripts
Cache = cache.Cache(debug=DEBUG_CACHE)
JCache = cache.JCache(Cache)
chunkify = cache.chunkify
def reconfigure_cache():