| var BASIS = (() => { |
| var _scriptName = typeof document != 'undefined' ? document.currentScript?.src : undefined; |
| return ( |
| async function(moduleArg = {}) { |
| var moduleRtn; |
| |
| // include: shell.js |
| // The Module object: Our interface to the outside world. We import |
| // and export values on it. There are various ways Module can be used: |
| // 1. Not defined. We create it here |
| // 2. A function parameter, function(moduleArg) => Promise<Module> |
| // 3. pre-run appended it, var Module = {}; ..generated code.. |
| // 4. External script tag defines var Module. |
| // We need to check if Module already exists (e.g. case 3 above). |
| // Substitution will be replaced with actual code on later stage of the build, |
| // this way Closure Compiler will not mangle it (e.g. case 4. above). |
| // Note that if you want to run closure, and also to use Module |
| // after the generated code, you will need to define var Module = {}; |
| // before the code. Then that object will be used in the code, and you |
| // can continue to use Module afterwards as well. |
| var Module = moduleArg; |
| |
| // Determine the runtime environment we are in. You can customize this by |
| // setting the ENVIRONMENT setting at compile time (see settings.js). |
| |
| // Attempt to auto-detect the environment |
| var ENVIRONMENT_IS_WEB = typeof window == 'object'; |
| var ENVIRONMENT_IS_WORKER = typeof WorkerGlobalScope != 'undefined'; |
| // N.b. Electron.js environment is simultaneously a NODE-environment, but |
| // also a web environment. |
| var ENVIRONMENT_IS_NODE = typeof process == 'object' && process.versions?.node && process.type != 'renderer'; |
| var ENVIRONMENT_IS_SHELL = !ENVIRONMENT_IS_WEB && !ENVIRONMENT_IS_NODE && !ENVIRONMENT_IS_WORKER; |
| |
| // --pre-jses are emitted after the Module integration code, so that they can |
| // refer to Module (if they choose; they can also define Module) |
| |
| |
| var arguments_ = []; |
| var thisProgram = './this.program'; |
| var quit_ = (status, toThrow) => { |
| throw toThrow; |
| }; |
| |
| if (typeof __filename != 'undefined') { // Node |
| _scriptName = __filename; |
| } else |
| if (ENVIRONMENT_IS_WORKER) { |
| _scriptName = self.location.href; |
| } |
| |
| // `/` should be present at the end if `scriptDirectory` is not empty |
| var scriptDirectory = ''; |
| function locateFile(path) { |
| if (Module['locateFile']) { |
| return Module['locateFile'](path, scriptDirectory); |
| } |
| return scriptDirectory + path; |
| } |
| |
| // Hooks that are implemented differently in different runtime environments. |
| var readAsync, readBinary; |
| |
| if (ENVIRONMENT_IS_NODE) { |
| |
| // These modules will usually be used on Node.js. Load them eagerly to avoid |
| // the complexity of lazy-loading. |
| var fs = require('fs'); |
| |
| scriptDirectory = __dirname + '/'; |
| |
| // include: node_shell_read.js |
| readBinary = (filename) => { |
| // We need to re-wrap `file://` strings to URLs. |
| filename = isFileURI(filename) ? new URL(filename) : filename; |
| var ret = fs.readFileSync(filename); |
| return ret; |
| }; |
| |
| readAsync = async (filename, binary = true) => { |
| // See the comment in the `readBinary` function. |
| filename = isFileURI(filename) ? new URL(filename) : filename; |
| var ret = fs.readFileSync(filename, binary ? undefined : 'utf8'); |
| return ret; |
| }; |
| // end include: node_shell_read.js |
| if (process.argv.length > 1) { |
| thisProgram = process.argv[1].replace(/\\/g, '/'); |
| } |
| |
| arguments_ = process.argv.slice(2); |
| |
| quit_ = (status, toThrow) => { |
| process.exitCode = status; |
| throw toThrow; |
| }; |
| |
| } else |
| |
| // Note that this includes Node.js workers when relevant (pthreads is enabled). |
| // Node.js workers are detected as a combination of ENVIRONMENT_IS_WORKER and |
| // ENVIRONMENT_IS_NODE. |
| if (ENVIRONMENT_IS_WEB || ENVIRONMENT_IS_WORKER) { |
| try { |
| scriptDirectory = new URL('.', _scriptName).href; // includes trailing slash |
| } catch { |
| // Must be a `blob:` or `data:` URL (e.g. `blob:http://site.com/etc/etc`), we cannot |
| // infer anything from them. |
| } |
| |
| { |
| // include: web_or_worker_shell_read.js |
| if (ENVIRONMENT_IS_WORKER) { |
| readBinary = (url) => { |
| var xhr = new XMLHttpRequest(); |
| xhr.open('GET', url, false); |
| xhr.responseType = 'arraybuffer'; |
| xhr.send(null); |
| return new Uint8Array(/** @type{!ArrayBuffer} */(xhr.response)); |
| }; |
| } |
| |
| readAsync = async (url) => { |
| // Fetch has some additional restrictions over XHR, like it can't be used on a file:// url. |
| // See https://github.com/github/fetch/pull/92#issuecomment-140665932 |
| // Cordova or Electron apps are typically loaded from a file:// url. |
| // So use XHR on webview if URL is a file URL. |
| if (isFileURI(url)) { |
| return new Promise((resolve, reject) => { |
| var xhr = new XMLHttpRequest(); |
| xhr.open('GET', url, true); |
| xhr.responseType = 'arraybuffer'; |
| xhr.onload = () => { |
| if (xhr.status == 200 || (xhr.status == 0 && xhr.response)) { // file URLs can return 0 |
| resolve(xhr.response); |
| return; |
| } |
| reject(xhr.status); |
| }; |
| xhr.onerror = reject; |
| xhr.send(null); |
| }); |
| } |
| var response = await fetch(url, { credentials: 'same-origin' }); |
| if (response.ok) { |
| return response.arrayBuffer(); |
| } |
| throw new Error(response.status + ' : ' + response.url); |
| }; |
| // end include: web_or_worker_shell_read.js |
| } |
| } else |
| { |
| } |
| |
| var out = console.log.bind(console); |
| var err = console.error.bind(console); |
| |
| // end include: shell.js |
| |
| // include: preamble.js |
| // === Preamble library stuff === |
| |
| // Documentation for the public APIs defined in this file must be updated in: |
| // site/source/docs/api_reference/preamble.js.rst |
| // A prebuilt local version of the documentation is available at: |
| // site/build/text/docs/api_reference/preamble.js.txt |
| // You can also build docs locally as HTML or other formats in site/ |
| // An online HTML version (which may be of a different version of Emscripten) |
| // is up at http://kripken.github.io/emscripten-site/docs/api_reference/preamble.js.html |
| |
| var wasmBinary; |
| |
| // Wasm globals |
| |
| //======================================== |
| // Runtime essentials |
| //======================================== |
| |
| // whether we are quitting the application. no code should run after this. |
| // set in exit() and abort() |
| var ABORT = false; |
| |
| // set by exit() and abort(). Passed to 'onExit' handler. |
| // NOTE: This is also used as the process return code code in shell environments |
| // but only when noExitRuntime is false. |
| var EXITSTATUS; |
| |
| // In STRICT mode, we only define assert() when ASSERTIONS is set. i.e. we |
| // don't define it at all in release modes. This matches the behaviour of |
| // MINIMAL_RUNTIME. |
| // TODO(sbc): Make this the default even without STRICT enabled. |
| /** @type {function(*, string=)} */ |
| function assert(condition, text) { |
| if (!condition) { |
| // This build was created without ASSERTIONS defined. `assert()` should not |
| // ever be called in this configuration but in case there are callers in |
| // the wild leave this simple abort() implementation here for now. |
| abort(text); |
| } |
| } |
| |
| /** |
| * Indicates whether filename is delivered via file protocol (as opposed to http/https) |
| * @noinline |
| */ |
| var isFileURI = (filename) => filename.startsWith('file://'); |
| |
| // include: runtime_common.js |
| // include: runtime_stack_check.js |
| // end include: runtime_stack_check.js |
| // include: runtime_exceptions.js |
| // end include: runtime_exceptions.js |
| // include: runtime_debug.js |
| // end include: runtime_debug.js |
| var readyPromiseResolve, readyPromiseReject; |
| |
| // Memory management |
| |
| var wasmMemory; |
| |
| var |
| /** @type {!Int8Array} */ |
| HEAP8, |
| /** @type {!Uint8Array} */ |
| HEAPU8, |
| /** @type {!Int16Array} */ |
| HEAP16, |
| /** @type {!Uint16Array} */ |
| HEAPU16, |
| /** @type {!Int32Array} */ |
| HEAP32, |
| /** @type {!Uint32Array} */ |
| HEAPU32, |
| /** @type {!Float32Array} */ |
| HEAPF32, |
| /** @type {!Float64Array} */ |
| HEAPF64; |
| |
| // BigInt64Array type is not correctly defined in closure |
| var |
| /** not-@type {!BigInt64Array} */ |
| HEAP64, |
| /* BigUint64Array type is not correctly defined in closure |
| /** not-@type {!BigUint64Array} */ |
| HEAPU64; |
| |
| var runtimeInitialized = false; |
| |
| |
| |
| function updateMemoryViews() { |
| var b = wasmMemory.buffer; |
| Module['HEAP8'] = HEAP8 = new Int8Array(b); |
| HEAP16 = new Int16Array(b); |
| HEAPU8 = new Uint8Array(b); |
| HEAPU16 = new Uint16Array(b); |
| HEAP32 = new Int32Array(b); |
| HEAPU32 = new Uint32Array(b); |
| HEAPF32 = new Float32Array(b); |
| HEAPF64 = new Float64Array(b); |
| HEAP64 = new BigInt64Array(b); |
| HEAPU64 = new BigUint64Array(b); |
| } |
| |
| // include: memoryprofiler.js |
| // end include: memoryprofiler.js |
| // end include: runtime_common.js |
| function preRun() { |
| if (Module['preRun']) { |
| if (typeof Module['preRun'] == 'function') Module['preRun'] = [Module['preRun']]; |
| while (Module['preRun'].length) { |
| addOnPreRun(Module['preRun'].shift()); |
| } |
| } |
| // Begin ATPRERUNS hooks |
| callRuntimeCallbacks(onPreRuns); |
| // End ATPRERUNS hooks |
| } |
| |
| function initRuntime() { |
| runtimeInitialized = true; |
| |
| // Begin ATINITS hooks |
| if (!Module['noFSInit'] && !FS.initialized) FS.init(); |
| TTY.init(); |
| // End ATINITS hooks |
| |
| wasmExports['__wasm_call_ctors'](); |
| |
| // Begin ATPOSTCTORS hooks |
| FS.ignorePermissions = false; |
| // End ATPOSTCTORS hooks |
| } |
| |
| function postRun() { |
| // PThreads reuse the runtime from the main thread. |
| |
| if (Module['postRun']) { |
| if (typeof Module['postRun'] == 'function') Module['postRun'] = [Module['postRun']]; |
| while (Module['postRun'].length) { |
| addOnPostRun(Module['postRun'].shift()); |
| } |
| } |
| |
| // Begin ATPOSTRUNS hooks |
| callRuntimeCallbacks(onPostRuns); |
| // End ATPOSTRUNS hooks |
| } |
| |
| // A counter of dependencies for calling run(). If we need to |
| // do asynchronous work before running, increment this and |
| // decrement it. Incrementing must happen in a place like |
| // Module.preRun (used by emcc to add file preloading). |
| // Note that you can add dependencies in preRun, even though |
| // it happens right before run - run will be postponed until |
| // the dependencies are met. |
| var runDependencies = 0; |
| var dependenciesFulfilled = null; // overridden to take different actions when all run dependencies are fulfilled |
| |
| function addRunDependency(id) { |
| runDependencies++; |
| |
| Module['monitorRunDependencies']?.(runDependencies); |
| |
| } |
| |
| function removeRunDependency(id) { |
| runDependencies--; |
| |
| Module['monitorRunDependencies']?.(runDependencies); |
| |
| if (runDependencies == 0) { |
| if (dependenciesFulfilled) { |
| var callback = dependenciesFulfilled; |
| dependenciesFulfilled = null; |
| callback(); // can add another dependenciesFulfilled |
| } |
| } |
| } |
| |
| /** @param {string|number=} what */ |
| function abort(what) { |
| Module['onAbort']?.(what); |
| |
| what = 'Aborted(' + what + ')'; |
| // TODO(sbc): Should we remove printing and leave it up to whoever |
| // catches the exception? |
| err(what); |
| |
| ABORT = true; |
| |
| what += '. Build with -sASSERTIONS for more info.'; |
| |
| // Use a wasm runtime error, because a JS error might be seen as a foreign |
| // exception, which means we'd run destructors on it. We need the error to |
| // simply make the program stop. |
| // FIXME This approach does not work in Wasm EH because it currently does not assume |
| // all RuntimeErrors are from traps; it decides whether a RuntimeError is from |
| // a trap or not based on a hidden field within the object. So at the moment |
| // we don't have a way of throwing a wasm trap from JS. TODO Make a JS API that |
| // allows this in the wasm spec. |
| |
| // Suppress closure compiler warning here. Closure compiler's builtin extern |
| // definition for WebAssembly.RuntimeError claims it takes no arguments even |
| // though it can. |
| // TODO(https://github.com/google/closure-compiler/pull/3913): Remove if/when upstream closure gets fixed. |
| /** @suppress {checkTypes} */ |
| var e = new WebAssembly.RuntimeError(what); |
| |
| readyPromiseReject?.(e); |
| // Throw the error whether or not MODULARIZE is set because abort is used |
| // in code paths apart from instantiation where an exception is expected |
| // to be thrown when abort is called. |
| throw e; |
| } |
| |
| var wasmBinaryFile; |
| |
| function findWasmBinary() { |
| return locateFile('basis_encoder.wasm'); |
| } |
| |
| function getBinarySync(file) { |
| if (file == wasmBinaryFile && wasmBinary) { |
| return new Uint8Array(wasmBinary); |
| } |
| if (readBinary) { |
| return readBinary(file); |
| } |
| throw 'both async and sync fetching of the wasm failed'; |
| } |
| |
| async function getWasmBinary(binaryFile) { |
| // If we don't have the binary yet, load it asynchronously using readAsync. |
| if (!wasmBinary) { |
| // Fetch the binary using readAsync |
| try { |
| var response = await readAsync(binaryFile); |
| return new Uint8Array(response); |
| } catch { |
| // Fall back to getBinarySync below; |
| } |
| } |
| |
| // Otherwise, getBinarySync should be able to get it synchronously |
| return getBinarySync(binaryFile); |
| } |
| |
| async function instantiateArrayBuffer(binaryFile, imports) { |
| try { |
| var binary = await getWasmBinary(binaryFile); |
| var instance = await WebAssembly.instantiate(binary, imports); |
| return instance; |
| } catch (reason) { |
| err(`failed to asynchronously prepare wasm: ${reason}`); |
| |
| abort(reason); |
| } |
| } |
| |
| async function instantiateAsync(binary, binaryFile, imports) { |
| if (!binary && typeof WebAssembly.instantiateStreaming == 'function' |
| // Don't use streaming for file:// delivered objects in a webview, fetch them synchronously. |
| && !isFileURI(binaryFile) |
| // Avoid instantiateStreaming() on Node.js environment for now, as while |
| // Node.js v18.1.0 implements it, it does not have a full fetch() |
| // implementation yet. |
| // |
| // Reference: |
| // https://github.com/emscripten-core/emscripten/pull/16917 |
| && !ENVIRONMENT_IS_NODE |
| ) { |
| try { |
| var response = fetch(binaryFile, { credentials: 'same-origin' }); |
| var instantiationResult = await WebAssembly.instantiateStreaming(response, imports); |
| return instantiationResult; |
| } catch (reason) { |
| // We expect the most common failure cause to be a bad MIME type for the binary, |
| // in which case falling back to ArrayBuffer instantiation should work. |
| err(`wasm streaming compile failed: ${reason}`); |
| err('falling back to ArrayBuffer instantiation'); |
| // fall back of instantiateArrayBuffer below |
| }; |
| } |
| return instantiateArrayBuffer(binaryFile, imports); |
| } |
| |
| function getWasmImports() { |
| // prepare imports |
| return { |
| 'env': wasmImports, |
| 'wasi_snapshot_preview1': wasmImports, |
| } |
| } |
| |
| // Create the wasm instance. |
| // Receives the wasm imports, returns the exports. |
| async function createWasm() { |
| // Load the wasm module and create an instance of using native support in the JS engine. |
| // handle a generated wasm instance, receiving its exports and |
| // performing other necessary setup |
| /** @param {WebAssembly.Module=} module*/ |
| function receiveInstance(instance, module) { |
| wasmExports = instance.exports; |
| |
| |
| |
| wasmMemory = wasmExports['memory']; |
| |
| updateMemoryViews(); |
| |
| wasmTable = wasmExports['__indirect_function_table']; |
| |
| |
| assignWasmExports(wasmExports); |
| removeRunDependency('wasm-instantiate'); |
| return wasmExports; |
| } |
| // wait for the pthread pool (if any) |
| addRunDependency('wasm-instantiate'); |
| |
| // Prefer streaming instantiation if available. |
| function receiveInstantiationResult(result) { |
| // 'result' is a ResultObject object which has both the module and instance. |
| // receiveInstance() will swap in the exports (to Module.asm) so they can be called |
| // TODO: Due to Closure regression https://github.com/google/closure-compiler/issues/3193, the above line no longer optimizes out down to the following line. |
| // When the regression is fixed, can restore the above PTHREADS-enabled path. |
| return receiveInstance(result['instance']); |
| } |
| |
| var info = getWasmImports(); |
| |
| // User shell pages can write their own Module.instantiateWasm = function(imports, successCallback) callback |
| // to manually instantiate the Wasm module themselves. This allows pages to |
| // run the instantiation parallel to any other async startup actions they are |
| // performing. |
| // Also pthreads and wasm workers initialize the wasm instance through this |
| // path. |
| if (Module['instantiateWasm']) { |
| return new Promise((resolve, reject) => { |
| Module['instantiateWasm'](info, (mod, inst) => { |
| resolve(receiveInstance(mod, inst)); |
| }); |
| }); |
| } |
| |
| wasmBinaryFile ??= findWasmBinary(); |
| var result = await instantiateAsync(wasmBinary, wasmBinaryFile, info); |
| var exports = receiveInstantiationResult(result); |
| return exports; |
| } |
| |
| // end include: preamble.js |
| |
| // Begin JS library code |
| |
| |
| class ExitStatus { |
| name = 'ExitStatus'; |
| constructor(status) { |
| this.message = `Program terminated with exit(${status})`; |
| this.status = status; |
| } |
| } |
| |
| var callRuntimeCallbacks = (callbacks) => { |
| while (callbacks.length > 0) { |
| // Pass the module as the first argument. |
| callbacks.shift()(Module); |
| } |
| }; |
| var onPostRuns = []; |
| var addOnPostRun = (cb) => onPostRuns.push(cb); |
| |
| var onPreRuns = []; |
| var addOnPreRun = (cb) => onPreRuns.push(cb); |
| |
| |
| |
| /** |
| * @param {number} ptr |
| * @param {string} type |
| */ |
| function getValue(ptr, type = 'i8') { |
| if (type.endsWith('*')) type = '*'; |
| switch (type) { |
| case 'i1': return HEAP8[ptr]; |
| case 'i8': return HEAP8[ptr]; |
| case 'i16': return HEAP16[((ptr)>>1)]; |
| case 'i32': return HEAP32[((ptr)>>2)]; |
| case 'i64': return HEAP64[((ptr)>>3)]; |
| case 'float': return HEAPF32[((ptr)>>2)]; |
| case 'double': return HEAPF64[((ptr)>>3)]; |
| case '*': return HEAPU32[((ptr)>>2)]; |
| default: abort(`invalid type for getValue: ${type}`); |
| } |
| } |
| |
| var noExitRuntime = true; |
| |
| |
| /** |
| * @param {number} ptr |
| * @param {number} value |
| * @param {string} type |
| */ |
| function setValue(ptr, value, type = 'i8') { |
| if (type.endsWith('*')) type = '*'; |
| switch (type) { |
| case 'i1': HEAP8[ptr] = value; break; |
| case 'i8': HEAP8[ptr] = value; break; |
| case 'i16': HEAP16[((ptr)>>1)] = value; break; |
| case 'i32': HEAP32[((ptr)>>2)] = value; break; |
| case 'i64': HEAP64[((ptr)>>3)] = BigInt(value); break; |
| case 'float': HEAPF32[((ptr)>>2)] = value; break; |
| case 'double': HEAPF64[((ptr)>>3)] = value; break; |
| case '*': HEAPU32[((ptr)>>2)] = value; break; |
| default: abort(`invalid type for setValue: ${type}`); |
| } |
| } |
| |
| var stackRestore = (val) => __emscripten_stack_restore(val); |
| |
| var stackSave = () => _emscripten_stack_get_current(); |
| |
| class ExceptionInfo { |
| // excPtr - Thrown object pointer to wrap. Metadata pointer is calculated from it. |
| constructor(excPtr) { |
| this.excPtr = excPtr; |
| this.ptr = excPtr - 24; |
| } |
| |
| set_type(type) { |
| HEAPU32[(((this.ptr)+(4))>>2)] = type; |
| } |
| |
| get_type() { |
| return HEAPU32[(((this.ptr)+(4))>>2)]; |
| } |
| |
| set_destructor(destructor) { |
| HEAPU32[(((this.ptr)+(8))>>2)] = destructor; |
| } |
| |
| get_destructor() { |
| return HEAPU32[(((this.ptr)+(8))>>2)]; |
| } |
| |
| set_caught(caught) { |
| caught = caught ? 1 : 0; |
| HEAP8[(this.ptr)+(12)] = caught; |
| } |
| |
| get_caught() { |
| return HEAP8[(this.ptr)+(12)] != 0; |
| } |
| |
| set_rethrown(rethrown) { |
| rethrown = rethrown ? 1 : 0; |
| HEAP8[(this.ptr)+(13)] = rethrown; |
| } |
| |
| get_rethrown() { |
| return HEAP8[(this.ptr)+(13)] != 0; |
| } |
| |
| // Initialize native structure fields. Should be called once after allocated. |
| init(type, destructor) { |
| this.set_adjusted_ptr(0); |
| this.set_type(type); |
| this.set_destructor(destructor); |
| } |
| |
| set_adjusted_ptr(adjustedPtr) { |
| HEAPU32[(((this.ptr)+(16))>>2)] = adjustedPtr; |
| } |
| |
| get_adjusted_ptr() { |
| return HEAPU32[(((this.ptr)+(16))>>2)]; |
| } |
| } |
| |
| var exceptionLast = 0; |
| |
| var uncaughtExceptionCount = 0; |
| var ___cxa_throw = (ptr, type, destructor) => { |
| var info = new ExceptionInfo(ptr); |
| // Initialize ExceptionInfo content after it was allocated in __cxa_allocate_exception. |
| info.init(type, destructor); |
| exceptionLast = ptr; |
| uncaughtExceptionCount++; |
| throw exceptionLast; |
| }; |
| |
| /** @suppress {duplicate } */ |
| var syscallGetVarargI = () => { |
| // the `+` prepended here is necessary to convince the JSCompiler that varargs is indeed a number. |
| var ret = HEAP32[((+SYSCALLS.varargs)>>2)]; |
| SYSCALLS.varargs += 4; |
| return ret; |
| }; |
| var syscallGetVarargP = syscallGetVarargI; |
| |
| |
| var PATH = { |
| isAbs:(path) => path.charAt(0) === '/', |
| splitPath:(filename) => { |
| var splitPathRe = /^(\/?|)([\s\S]*?)((?:\.{1,2}|[^\/]+?|)(\.[^.\/]*|))(?:[\/]*)$/; |
| return splitPathRe.exec(filename).slice(1); |
| }, |
| normalizeArray:(parts, allowAboveRoot) => { |
| // if the path tries to go above the root, `up` ends up > 0 |
| var up = 0; |
| for (var i = parts.length - 1; i >= 0; i--) { |
| var last = parts[i]; |
| if (last === '.') { |
| parts.splice(i, 1); |
| } else if (last === '..') { |
| parts.splice(i, 1); |
| up++; |
| } else if (up) { |
| parts.splice(i, 1); |
| up--; |
| } |
| } |
| // if the path is allowed to go above the root, restore leading ..s |
| if (allowAboveRoot) { |
| for (; up; up--) { |
| parts.unshift('..'); |
| } |
| } |
| return parts; |
| }, |
| normalize:(path) => { |
| var isAbsolute = PATH.isAbs(path), |
| trailingSlash = path.slice(-1) === '/'; |
| // Normalize the path |
| path = PATH.normalizeArray(path.split('/').filter((p) => !!p), !isAbsolute).join('/'); |
| if (!path && !isAbsolute) { |
| path = '.'; |
| } |
| if (path && trailingSlash) { |
| path += '/'; |
| } |
| return (isAbsolute ? '/' : '') + path; |
| }, |
| dirname:(path) => { |
| var result = PATH.splitPath(path), |
| root = result[0], |
| dir = result[1]; |
| if (!root && !dir) { |
| // No dirname whatsoever |
| return '.'; |
| } |
| if (dir) { |
| // It has a dirname, strip trailing slash |
| dir = dir.slice(0, -1); |
| } |
| return root + dir; |
| }, |
| basename:(path) => path && path.match(/([^\/]+|\/)\/*$/)[1], |
| join:(...paths) => PATH.normalize(paths.join('/')), |
| join2:(l, r) => PATH.normalize(l + '/' + r), |
| }; |
| |
| var initRandomFill = () => { |
| // This block is not needed on v19+ since crypto.getRandomValues is builtin |
| if (ENVIRONMENT_IS_NODE) { |
| var nodeCrypto = require('crypto'); |
| return (view) => nodeCrypto.randomFillSync(view); |
| } |
| |
| return (view) => crypto.getRandomValues(view); |
| }; |
| var randomFill = (view) => { |
| // Lazily init on the first invocation. |
| (randomFill = initRandomFill())(view); |
| }; |
| |
| |
| |
| var PATH_FS = { |
| resolve:(...args) => { |
| var resolvedPath = '', |
| resolvedAbsolute = false; |
| for (var i = args.length - 1; i >= -1 && !resolvedAbsolute; i--) { |
| var path = (i >= 0) ? args[i] : FS.cwd(); |
| // Skip empty and invalid entries |
| if (typeof path != 'string') { |
| throw new TypeError('Arguments to path.resolve must be strings'); |
| } else if (!path) { |
| return ''; // an invalid portion invalidates the whole thing |
| } |
| resolvedPath = path + '/' + resolvedPath; |
| resolvedAbsolute = PATH.isAbs(path); |
| } |
| // At this point the path should be resolved to a full absolute path, but |
| // handle relative paths to be safe (might happen when process.cwd() fails) |
| resolvedPath = PATH.normalizeArray(resolvedPath.split('/').filter((p) => !!p), !resolvedAbsolute).join('/'); |
| return ((resolvedAbsolute ? '/' : '') + resolvedPath) || '.'; |
| }, |
| relative:(from, to) => { |
| from = PATH_FS.resolve(from).slice(1); |
| to = PATH_FS.resolve(to).slice(1); |
| function trim(arr) { |
| var start = 0; |
| for (; start < arr.length; start++) { |
| if (arr[start] !== '') break; |
| } |
| var end = arr.length - 1; |
| for (; end >= 0; end--) { |
| if (arr[end] !== '') break; |
| } |
| if (start > end) return []; |
| return arr.slice(start, end - start + 1); |
| } |
| var fromParts = trim(from.split('/')); |
| var toParts = trim(to.split('/')); |
| var length = Math.min(fromParts.length, toParts.length); |
| var samePartsLength = length; |
| for (var i = 0; i < length; i++) { |
| if (fromParts[i] !== toParts[i]) { |
| samePartsLength = i; |
| break; |
| } |
| } |
| var outputParts = []; |
| for (var i = samePartsLength; i < fromParts.length; i++) { |
| outputParts.push('..'); |
| } |
| outputParts = outputParts.concat(toParts.slice(samePartsLength)); |
| return outputParts.join('/'); |
| }, |
| }; |
| |
| |
| var UTF8Decoder = typeof TextDecoder != 'undefined' ? new TextDecoder() : undefined; |
| |
| /** |
| * Given a pointer 'idx' to a null-terminated UTF8-encoded string in the given |
| * array that contains uint8 values, returns a copy of that string as a |
| * Javascript String object. |
| * heapOrArray is either a regular array, or a JavaScript typed array view. |
| * @param {number=} idx |
| * @param {number=} maxBytesToRead |
| * @return {string} |
| */ |
| var UTF8ArrayToString = (heapOrArray, idx = 0, maxBytesToRead = NaN) => { |
| var endIdx = idx + maxBytesToRead; |
| var endPtr = idx; |
| // TextDecoder needs to know the byte length in advance, it doesn't stop on |
| // null terminator by itself. Also, use the length info to avoid running tiny |
| // strings through TextDecoder, since .subarray() allocates garbage. |
| // (As a tiny code save trick, compare endPtr against endIdx using a negation, |
| // so that undefined/NaN means Infinity) |
| while (heapOrArray[endPtr] && !(endPtr >= endIdx)) ++endPtr; |
| |
| // When using conditional TextDecoder, skip it for short strings as the overhead of the native call is not worth it. |
| if (endPtr - idx > 16 && heapOrArray.buffer && UTF8Decoder) { |
| return UTF8Decoder.decode(heapOrArray.subarray(idx, endPtr)); |
| } |
| var str = ''; |
| // If building with TextDecoder, we have already computed the string length |
| // above, so test loop end condition against that |
| while (idx < endPtr) { |
| // For UTF8 byte structure, see: |
| // http://en.wikipedia.org/wiki/UTF-8#Description |
| // https://www.ietf.org/rfc/rfc2279.txt |
| // https://tools.ietf.org/html/rfc3629 |
| var u0 = heapOrArray[idx++]; |
| if (!(u0 & 0x80)) { str += String.fromCharCode(u0); continue; } |
| var u1 = heapOrArray[idx++] & 63; |
| if ((u0 & 0xE0) == 0xC0) { str += String.fromCharCode(((u0 & 31) << 6) | u1); continue; } |
| var u2 = heapOrArray[idx++] & 63; |
| if ((u0 & 0xF0) == 0xE0) { |
| u0 = ((u0 & 15) << 12) | (u1 << 6) | u2; |
| } else { |
| u0 = ((u0 & 7) << 18) | (u1 << 12) | (u2 << 6) | (heapOrArray[idx++] & 63); |
| } |
| |
| if (u0 < 0x10000) { |
| str += String.fromCharCode(u0); |
| } else { |
| var ch = u0 - 0x10000; |
| str += String.fromCharCode(0xD800 | (ch >> 10), 0xDC00 | (ch & 0x3FF)); |
| } |
| } |
| return str; |
| }; |
| |
| var FS_stdin_getChar_buffer = []; |
| |
| var lengthBytesUTF8 = (str) => { |
| var len = 0; |
| for (var i = 0; i < str.length; ++i) { |
| // Gotcha: charCodeAt returns a 16-bit word that is a UTF-16 encoded code |
| // unit, not a Unicode code point of the character! So decode |
| // UTF16->UTF32->UTF8. |
| // See http://unicode.org/faq/utf_bom.html#utf16-3 |
| var c = str.charCodeAt(i); // possibly a lead surrogate |
| if (c <= 0x7F) { |
| len++; |
| } else if (c <= 0x7FF) { |
| len += 2; |
| } else if (c >= 0xD800 && c <= 0xDFFF) { |
| len += 4; ++i; |
| } else { |
| len += 3; |
| } |
| } |
| return len; |
| }; |
| |
| var stringToUTF8Array = (str, heap, outIdx, maxBytesToWrite) => { |
| // Parameter maxBytesToWrite is not optional. Negative values, 0, null, |
| // undefined and false each don't write out any bytes. |
| if (!(maxBytesToWrite > 0)) |
| return 0; |
| |
| var startIdx = outIdx; |
| var endIdx = outIdx + maxBytesToWrite - 1; // -1 for string null terminator. |
| for (var i = 0; i < str.length; ++i) { |
| // For UTF8 byte structure, see http://en.wikipedia.org/wiki/UTF-8#Description |
| // and https://www.ietf.org/rfc/rfc2279.txt |
| // and https://tools.ietf.org/html/rfc3629 |
| var u = str.codePointAt(i); |
| if (u <= 0x7F) { |
| if (outIdx >= endIdx) break; |
| heap[outIdx++] = u; |
| } else if (u <= 0x7FF) { |
| if (outIdx + 1 >= endIdx) break; |
| heap[outIdx++] = 0xC0 | (u >> 6); |
| heap[outIdx++] = 0x80 | (u & 63); |
| } else if (u <= 0xFFFF) { |
| if (outIdx + 2 >= endIdx) break; |
| heap[outIdx++] = 0xE0 | (u >> 12); |
| heap[outIdx++] = 0x80 | ((u >> 6) & 63); |
| heap[outIdx++] = 0x80 | (u & 63); |
| } else { |
| if (outIdx + 3 >= endIdx) break; |
| heap[outIdx++] = 0xF0 | (u >> 18); |
| heap[outIdx++] = 0x80 | ((u >> 12) & 63); |
| heap[outIdx++] = 0x80 | ((u >> 6) & 63); |
| heap[outIdx++] = 0x80 | (u & 63); |
| // Gotcha: if codePoint is over 0xFFFF, it is represented as a surrogate pair in UTF-16. |
| // We need to manually skip over the second code unit for correct iteration. |
| i++; |
| } |
| } |
| // Null-terminate the pointer to the buffer. |
| heap[outIdx] = 0; |
| return outIdx - startIdx; |
| }; |
| /** @type {function(string, boolean=, number=)} */ |
| var intArrayFromString = (stringy, dontAddNull, length) => { |
| var len = length > 0 ? length : lengthBytesUTF8(stringy)+1; |
| var u8array = new Array(len); |
| var numBytesWritten = stringToUTF8Array(stringy, u8array, 0, u8array.length); |
| if (dontAddNull) u8array.length = numBytesWritten; |
| return u8array; |
| }; |
| var FS_stdin_getChar = () => { |
| if (!FS_stdin_getChar_buffer.length) { |
| var result = null; |
| if (ENVIRONMENT_IS_NODE) { |
| // we will read data by chunks of BUFSIZE |
| var BUFSIZE = 256; |
| var buf = Buffer.alloc(BUFSIZE); |
| var bytesRead = 0; |
| |
| // For some reason we must suppress a closure warning here, even though |
| // fd definitely exists on process.stdin, and is even the proper way to |
| // get the fd of stdin, |
| // https://github.com/nodejs/help/issues/2136#issuecomment-523649904 |
| // This started to happen after moving this logic out of library_tty.js, |
| // so it is related to the surrounding code in some unclear manner. |
| /** @suppress {missingProperties} */ |
| var fd = process.stdin.fd; |
| |
| try { |
| bytesRead = fs.readSync(fd, buf, 0, BUFSIZE); |
| } catch(e) { |
| // Cross-platform differences: on Windows, reading EOF throws an |
| // exception, but on other OSes, reading EOF returns 0. Uniformize |
| // behavior by treating the EOF exception to return 0. |
| if (e.toString().includes('EOF')) bytesRead = 0; |
| else throw e; |
| } |
| |
| if (bytesRead > 0) { |
| result = buf.slice(0, bytesRead).toString('utf-8'); |
| } |
| } else |
| if (typeof window != 'undefined' && |
| typeof window.prompt == 'function') { |
| // Browser. |
| result = window.prompt('Input: '); // returns null on cancel |
| if (result !== null) { |
| result += '\n'; |
| } |
| } else |
| {} |
| if (!result) { |
| return null; |
| } |
| FS_stdin_getChar_buffer = intArrayFromString(result, true); |
| } |
| return FS_stdin_getChar_buffer.shift(); |
| }; |
| var TTY = { |
| ttys:[], |
| init() { |
| // https://github.com/emscripten-core/emscripten/pull/1555 |
| // if (ENVIRONMENT_IS_NODE) { |
| // // currently, FS.init does not distinguish if process.stdin is a file or TTY |
| // // device, it always assumes it's a TTY device. because of this, we're forcing |
| // // process.stdin to UTF8 encoding to at least make stdin reading compatible |
| // // with text files until FS.init can be refactored. |
| // process.stdin.setEncoding('utf8'); |
| // } |
| }, |
| shutdown() { |
| // https://github.com/emscripten-core/emscripten/pull/1555 |
| // if (ENVIRONMENT_IS_NODE) { |
| // // inolen: any idea as to why node -e 'process.stdin.read()' wouldn't exit immediately (with process.stdin being a tty)? |
| // // isaacs: because now it's reading from the stream, you've expressed interest in it, so that read() kicks off a _read() which creates a ReadReq operation |
| // // inolen: I thought read() in that case was a synchronous operation that just grabbed some amount of buffered data if it exists? |
| // // isaacs: it is. but it also triggers a _read() call, which calls readStart() on the handle |
| // // isaacs: do process.stdin.pause() and i'd think it'd probably close the pending call |
| // process.stdin.pause(); |
| // } |
| }, |
| register(dev, ops) { |
| TTY.ttys[dev] = { input: [], output: [], ops: ops }; |
| FS.registerDevice(dev, TTY.stream_ops); |
| }, |
| stream_ops:{ |
| open(stream) { |
| var tty = TTY.ttys[stream.node.rdev]; |
| if (!tty) { |
| throw new FS.ErrnoError(43); |
| } |
| stream.tty = tty; |
| stream.seekable = false; |
| }, |
| close(stream) { |
| // flush any pending line data |
| stream.tty.ops.fsync(stream.tty); |
| }, |
| fsync(stream) { |
| stream.tty.ops.fsync(stream.tty); |
| }, |
| read(stream, buffer, offset, length, pos /* ignored */) { |
| if (!stream.tty || !stream.tty.ops.get_char) { |
| throw new FS.ErrnoError(60); |
| } |
| var bytesRead = 0; |
| for (var i = 0; i < length; i++) { |
| var result; |
| try { |
| result = stream.tty.ops.get_char(stream.tty); |
| } catch (e) { |
| throw new FS.ErrnoError(29); |
| } |
| if (result === undefined && bytesRead === 0) { |
| throw new FS.ErrnoError(6); |
| } |
| if (result === null || result === undefined) break; |
| bytesRead++; |
| buffer[offset+i] = result; |
| } |
| if (bytesRead) { |
| stream.node.atime = Date.now(); |
| } |
| return bytesRead; |
| }, |
| write(stream, buffer, offset, length, pos) { |
| if (!stream.tty || !stream.tty.ops.put_char) { |
| throw new FS.ErrnoError(60); |
| } |
| try { |
| for (var i = 0; i < length; i++) { |
| stream.tty.ops.put_char(stream.tty, buffer[offset+i]); |
| } |
| } catch (e) { |
| throw new FS.ErrnoError(29); |
| } |
| if (length) { |
| stream.node.mtime = stream.node.ctime = Date.now(); |
| } |
| return i; |
| }, |
| }, |
| default_tty_ops:{ |
| get_char(tty) { |
| return FS_stdin_getChar(); |
| }, |
| put_char(tty, val) { |
| if (val === null || val === 10) { |
| out(UTF8ArrayToString(tty.output)); |
| tty.output = []; |
| } else { |
| if (val != 0) tty.output.push(val); // val == 0 would cut text output off in the middle. |
| } |
| }, |
| fsync(tty) { |
| if (tty.output?.length > 0) { |
| out(UTF8ArrayToString(tty.output)); |
| tty.output = []; |
| } |
| }, |
| ioctl_tcgets(tty) { |
| // typical setting |
| return { |
| c_iflag: 25856, |
| c_oflag: 5, |
| c_cflag: 191, |
| c_lflag: 35387, |
| c_cc: [ |
| 0x03, 0x1c, 0x7f, 0x15, 0x04, 0x00, 0x01, 0x00, 0x11, 0x13, 0x1a, 0x00, |
| 0x12, 0x0f, 0x17, 0x16, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, |
| 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, |
| ] |
| }; |
| }, |
| ioctl_tcsets(tty, optional_actions, data) { |
| // currently just ignore |
| return 0; |
| }, |
| ioctl_tiocgwinsz(tty) { |
| return [24, 80]; |
| }, |
| }, |
| default_tty1_ops:{ |
| put_char(tty, val) { |
| if (val === null || val === 10) { |
| err(UTF8ArrayToString(tty.output)); |
| tty.output = []; |
| } else { |
| if (val != 0) tty.output.push(val); |
| } |
| }, |
| fsync(tty) { |
| if (tty.output?.length > 0) { |
| err(UTF8ArrayToString(tty.output)); |
| tty.output = []; |
| } |
| }, |
| }, |
| }; |
| |
| |
| var zeroMemory = (ptr, size) => HEAPU8.fill(0, ptr, ptr + size); |
| |
| var alignMemory = (size, alignment) => { |
| return Math.ceil(size / alignment) * alignment; |
| }; |
| var mmapAlloc = (size) => { |
| size = alignMemory(size, 65536); |
| var ptr = _emscripten_builtin_memalign(65536, size); |
| if (ptr) zeroMemory(ptr, size); |
| return ptr; |
| }; |
| var MEMFS = { |
| ops_table:null, |
| mount(mount) { |
| return MEMFS.createNode(null, '/', 16895, 0); |
| }, |
| createNode(parent, name, mode, dev) { |
| if (FS.isBlkdev(mode) || FS.isFIFO(mode)) { |
| // no supported |
| throw new FS.ErrnoError(63); |
| } |
| MEMFS.ops_table ||= { |
| dir: { |
| node: { |
| getattr: MEMFS.node_ops.getattr, |
| setattr: MEMFS.node_ops.setattr, |
| lookup: MEMFS.node_ops.lookup, |
| mknod: MEMFS.node_ops.mknod, |
| rename: MEMFS.node_ops.rename, |
| unlink: MEMFS.node_ops.unlink, |
| rmdir: MEMFS.node_ops.rmdir, |
| readdir: MEMFS.node_ops.readdir, |
| symlink: MEMFS.node_ops.symlink |
| }, |
| stream: { |
| llseek: MEMFS.stream_ops.llseek |
| } |
| }, |
| file: { |
| node: { |
| getattr: MEMFS.node_ops.getattr, |
| setattr: MEMFS.node_ops.setattr |
| }, |
| stream: { |
| llseek: MEMFS.stream_ops.llseek, |
| read: MEMFS.stream_ops.read, |
| write: MEMFS.stream_ops.write, |
| mmap: MEMFS.stream_ops.mmap, |
| msync: MEMFS.stream_ops.msync |
| } |
| }, |
| link: { |
| node: { |
| getattr: MEMFS.node_ops.getattr, |
| setattr: MEMFS.node_ops.setattr, |
| readlink: MEMFS.node_ops.readlink |
| }, |
| stream: {} |
| }, |
| chrdev: { |
| node: { |
| getattr: MEMFS.node_ops.getattr, |
| setattr: MEMFS.node_ops.setattr |
| }, |
| stream: FS.chrdev_stream_ops |
| } |
| }; |
| var node = FS.createNode(parent, name, mode, dev); |
| if (FS.isDir(node.mode)) { |
| node.node_ops = MEMFS.ops_table.dir.node; |
| node.stream_ops = MEMFS.ops_table.dir.stream; |
| node.contents = {}; |
| } else if (FS.isFile(node.mode)) { |
| node.node_ops = MEMFS.ops_table.file.node; |
| node.stream_ops = MEMFS.ops_table.file.stream; |
| node.usedBytes = 0; // The actual number of bytes used in the typed array, as opposed to contents.length which gives the whole capacity. |
| // When the byte data of the file is populated, this will point to either a typed array, or a normal JS array. Typed arrays are preferred |
| // for performance, and used by default. However, typed arrays are not resizable like normal JS arrays are, so there is a small disk size |
| // penalty involved for appending file writes that continuously grow a file similar to std::vector capacity vs used -scheme. |
| node.contents = null; |
| } else if (FS.isLink(node.mode)) { |
| node.node_ops = MEMFS.ops_table.link.node; |
| node.stream_ops = MEMFS.ops_table.link.stream; |
| } else if (FS.isChrdev(node.mode)) { |
| node.node_ops = MEMFS.ops_table.chrdev.node; |
| node.stream_ops = MEMFS.ops_table.chrdev.stream; |
| } |
| node.atime = node.mtime = node.ctime = Date.now(); |
| // add the new node to the parent |
| if (parent) { |
| parent.contents[name] = node; |
| parent.atime = parent.mtime = parent.ctime = node.atime; |
| } |
| return node; |
| }, |
| getFileDataAsTypedArray(node) { |
| if (!node.contents) return new Uint8Array(0); |
| if (node.contents.subarray) return node.contents.subarray(0, node.usedBytes); // Make sure to not return excess unused bytes. |
| return new Uint8Array(node.contents); |
| }, |
| expandFileStorage(node, newCapacity) { |
| var prevCapacity = node.contents ? node.contents.length : 0; |
| if (prevCapacity >= newCapacity) return; // No need to expand, the storage was already large enough. |
| // Don't expand strictly to the given requested limit if it's only a very small increase, but instead geometrically grow capacity. |
| // For small filesizes (<1MB), perform size*2 geometric increase, but for large sizes, do a much more conservative size*1.125 increase to |
| // avoid overshooting the allocation cap by a very large margin. |
| var CAPACITY_DOUBLING_MAX = 1024 * 1024; |
| newCapacity = Math.max(newCapacity, (prevCapacity * (prevCapacity < CAPACITY_DOUBLING_MAX ? 2.0 : 1.125)) >>> 0); |
| if (prevCapacity != 0) newCapacity = Math.max(newCapacity, 256); // At minimum allocate 256b for each file when expanding. |
| var oldContents = node.contents; |
| node.contents = new Uint8Array(newCapacity); // Allocate new storage. |
| if (node.usedBytes > 0) node.contents.set(oldContents.subarray(0, node.usedBytes), 0); // Copy old data over to the new storage. |
| }, |
| resizeFileStorage(node, newSize) { |
| if (node.usedBytes == newSize) return; |
| if (newSize == 0) { |
| node.contents = null; // Fully decommit when requesting a resize to zero. |
| node.usedBytes = 0; |
| } else { |
| var oldContents = node.contents; |
| node.contents = new Uint8Array(newSize); // Allocate new storage. |
| if (oldContents) { |
| node.contents.set(oldContents.subarray(0, Math.min(newSize, node.usedBytes))); // Copy old data over to the new storage. |
| } |
| node.usedBytes = newSize; |
| } |
| }, |
| node_ops:{ |
| getattr(node) { |
| var attr = {}; |
| // device numbers reuse inode numbers. |
| attr.dev = FS.isChrdev(node.mode) ? node.id : 1; |
| attr.ino = node.id; |
| attr.mode = node.mode; |
| attr.nlink = 1; |
| attr.uid = 0; |
| attr.gid = 0; |
| attr.rdev = node.rdev; |
| if (FS.isDir(node.mode)) { |
| attr.size = 4096; |
| } else if (FS.isFile(node.mode)) { |
| attr.size = node.usedBytes; |
| } else if (FS.isLink(node.mode)) { |
| attr.size = node.link.length; |
| } else { |
| attr.size = 0; |
| } |
| attr.atime = new Date(node.atime); |
| attr.mtime = new Date(node.mtime); |
| attr.ctime = new Date(node.ctime); |
| // NOTE: In our implementation, st_blocks = Math.ceil(st_size/st_blksize), |
| // but this is not required by the standard. |
| attr.blksize = 4096; |
| attr.blocks = Math.ceil(attr.size / attr.blksize); |
| return attr; |
| }, |
| setattr(node, attr) { |
| for (const key of ["mode", "atime", "mtime", "ctime"]) { |
| if (attr[key] != null) { |
| node[key] = attr[key]; |
| } |
| } |
| if (attr.size !== undefined) { |
| MEMFS.resizeFileStorage(node, attr.size); |
| } |
| }, |
| lookup(parent, name) { |
| throw MEMFS.doesNotExistError; |
| }, |
| mknod(parent, name, mode, dev) { |
| return MEMFS.createNode(parent, name, mode, dev); |
| }, |
| rename(old_node, new_dir, new_name) { |
| var new_node; |
| try { |
| new_node = FS.lookupNode(new_dir, new_name); |
| } catch (e) {} |
| if (new_node) { |
| if (FS.isDir(old_node.mode)) { |
| // if we're overwriting a directory at new_name, make sure it's empty. |
| for (var i in new_node.contents) { |
| throw new FS.ErrnoError(55); |
| } |
| } |
| FS.hashRemoveNode(new_node); |
| } |
| // do the internal rewiring |
| delete old_node.parent.contents[old_node.name]; |
| new_dir.contents[new_name] = old_node; |
| old_node.name = new_name; |
| new_dir.ctime = new_dir.mtime = old_node.parent.ctime = old_node.parent.mtime = Date.now(); |
| }, |
| unlink(parent, name) { |
| delete parent.contents[name]; |
| parent.ctime = parent.mtime = Date.now(); |
| }, |
| rmdir(parent, name) { |
| var node = FS.lookupNode(parent, name); |
| for (var i in node.contents) { |
| throw new FS.ErrnoError(55); |
| } |
| delete parent.contents[name]; |
| parent.ctime = parent.mtime = Date.now(); |
| }, |
| readdir(node) { |
| return ['.', '..', ...Object.keys(node.contents)]; |
| }, |
| symlink(parent, newname, oldpath) { |
| var node = MEMFS.createNode(parent, newname, 0o777 | 40960, 0); |
| node.link = oldpath; |
| return node; |
| }, |
| readlink(node) { |
| if (!FS.isLink(node.mode)) { |
| throw new FS.ErrnoError(28); |
| } |
| return node.link; |
| }, |
| }, |
| stream_ops:{ |
| read(stream, buffer, offset, length, position) { |
| var contents = stream.node.contents; |
| if (position >= stream.node.usedBytes) return 0; |
| var size = Math.min(stream.node.usedBytes - position, length); |
| if (size > 8 && contents.subarray) { // non-trivial, and typed array |
| buffer.set(contents.subarray(position, position + size), offset); |
| } else { |
| for (var i = 0; i < size; i++) buffer[offset + i] = contents[position + i]; |
| } |
| return size; |
| }, |
| write(stream, buffer, offset, length, position, canOwn) { |
| // If the buffer is located in main memory (HEAP), and if |
| // memory can grow, we can't hold on to references of the |
| // memory buffer, as they may get invalidated. That means we |
| // need to do copy its contents. |
| if (buffer.buffer === HEAP8.buffer) { |
| canOwn = false; |
| } |
| |
| if (!length) return 0; |
| var node = stream.node; |
| node.mtime = node.ctime = Date.now(); |
| |
| if (buffer.subarray && (!node.contents || node.contents.subarray)) { // This write is from a typed array to a typed array? |
| if (canOwn) { |
| node.contents = buffer.subarray(offset, offset + length); |
| node.usedBytes = length; |
| return length; |
| } else if (node.usedBytes === 0 && position === 0) { // If this is a simple first write to an empty file, do a fast set since we don't need to care about old data. |
| node.contents = buffer.slice(offset, offset + length); |
| node.usedBytes = length; |
| return length; |
| } else if (position + length <= node.usedBytes) { // Writing to an already allocated and used subrange of the file? |
| node.contents.set(buffer.subarray(offset, offset + length), position); |
| return length; |
| } |
| } |
| |
| // Appending to an existing file and we need to reallocate, or source data did not come as a typed array. |
| MEMFS.expandFileStorage(node, position+length); |
| if (node.contents.subarray && buffer.subarray) { |
| // Use typed array write which is available. |
| node.contents.set(buffer.subarray(offset, offset + length), position); |
| } else { |
| for (var i = 0; i < length; i++) { |
| node.contents[position + i] = buffer[offset + i]; // Or fall back to manual write if not. |
| } |
| } |
| node.usedBytes = Math.max(node.usedBytes, position + length); |
| return length; |
| }, |
| llseek(stream, offset, whence) { |
| var position = offset; |
| if (whence === 1) { |
| position += stream.position; |
| } else if (whence === 2) { |
| if (FS.isFile(stream.node.mode)) { |
| position += stream.node.usedBytes; |
| } |
| } |
| if (position < 0) { |
| throw new FS.ErrnoError(28); |
| } |
| return position; |
| }, |
| mmap(stream, length, position, prot, flags) { |
| if (!FS.isFile(stream.node.mode)) { |
| throw new FS.ErrnoError(43); |
| } |
| var ptr; |
| var allocated; |
| var contents = stream.node.contents; |
| // Only make a new copy when MAP_PRIVATE is specified. |
| if (!(flags & 2) && contents && contents.buffer === HEAP8.buffer) { |
| // We can't emulate MAP_SHARED when the file is not backed by the |
| // buffer we're mapping to (e.g. the HEAP buffer). |
| allocated = false; |
| ptr = contents.byteOffset; |
| } else { |
| allocated = true; |
| ptr = mmapAlloc(length); |
| if (!ptr) { |
| throw new FS.ErrnoError(48); |
| } |
| if (contents) { |
| // Try to avoid unnecessary slices. |
| if (position > 0 || position + length < contents.length) { |
| if (contents.subarray) { |
| contents = contents.subarray(position, position + length); |
| } else { |
| contents = Array.prototype.slice.call(contents, position, position + length); |
| } |
| } |
| HEAP8.set(contents, ptr); |
| } |
| } |
| return { ptr, allocated }; |
| }, |
| msync(stream, buffer, offset, length, mmapFlags) { |
| MEMFS.stream_ops.write(stream, buffer, 0, length, offset, false); |
| // should we check if bytesWritten and length are the same? |
| return 0; |
| }, |
| }, |
| }; |
| |
| var asyncLoad = async (url) => { |
| var arrayBuffer = await readAsync(url); |
| return new Uint8Array(arrayBuffer); |
| }; |
| |
| |
| var FS_createDataFile = (...args) => FS.createDataFile(...args); |
| |
| var getUniqueRunDependency = (id) => { |
| return id; |
| }; |
| |
| var preloadPlugins = []; |
| var FS_handledByPreloadPlugin = (byteArray, fullname, finish, onerror) => { |
| // Ensure plugins are ready. |
| if (typeof Browser != 'undefined') Browser.init(); |
| |
| var handled = false; |
| preloadPlugins.forEach((plugin) => { |
| if (handled) return; |
| if (plugin['canHandle'](fullname)) { |
| plugin['handle'](byteArray, fullname, finish, onerror); |
| handled = true; |
| } |
| }); |
| return handled; |
| }; |
| var FS_createPreloadedFile = (parent, name, url, canRead, canWrite, onload, onerror, dontCreateFile, canOwn, preFinish) => { |
| // TODO we should allow people to just pass in a complete filename instead |
| // of parent and name being that we just join them anyways |
| var fullname = name ? PATH_FS.resolve(PATH.join2(parent, name)) : parent; |
| var dep = getUniqueRunDependency(`cp ${fullname}`); // might have several active requests for the same fullname |
| function processData(byteArray) { |
| function finish(byteArray) { |
| preFinish?.(); |
| if (!dontCreateFile) { |
| FS_createDataFile(parent, name, byteArray, canRead, canWrite, canOwn); |
| } |
| onload?.(); |
| removeRunDependency(dep); |
| } |
| if (FS_handledByPreloadPlugin(byteArray, fullname, finish, () => { |
| onerror?.(); |
| removeRunDependency(dep); |
| })) { |
| return; |
| } |
| finish(byteArray); |
| } |
| addRunDependency(dep); |
| if (typeof url == 'string') { |
| asyncLoad(url).then(processData, onerror); |
| } else { |
| processData(url); |
| } |
| }; |
| |
| var FS_modeStringToFlags = (str) => { |
| var flagModes = { |
| 'r': 0, |
| 'r+': 2, |
| 'w': 512 | 64 | 1, |
| 'w+': 512 | 64 | 2, |
| 'a': 1024 | 64 | 1, |
| 'a+': 1024 | 64 | 2, |
| }; |
| var flags = flagModes[str]; |
| if (typeof flags == 'undefined') { |
| throw new Error(`Unknown file open mode: ${str}`); |
| } |
| return flags; |
| }; |
| |
| var FS_getMode = (canRead, canWrite) => { |
| var mode = 0; |
| if (canRead) mode |= 292 | 73; |
| if (canWrite) mode |= 146; |
| return mode; |
| }; |
| |
| var FS = { |
| root:null, |
| mounts:[], |
| devices:{ |
| }, |
| streams:[], |
| nextInode:1, |
| nameTable:null, |
| currentPath:"/", |
| initialized:false, |
| ignorePermissions:true, |
| filesystems:null, |
| syncFSRequests:0, |
| readFiles:{ |
| }, |
| ErrnoError:class { |
| name = 'ErrnoError'; |
| // We set the `name` property to be able to identify `FS.ErrnoError` |
| // - the `name` is a standard ECMA-262 property of error objects. Kind of good to have it anyway. |
| // - when using PROXYFS, an error can come from an underlying FS |
| // as different FS objects have their own FS.ErrnoError each, |
| // the test `err instanceof FS.ErrnoError` won't detect an error coming from another filesystem, causing bugs. |
| // we'll use the reliable test `err.name == "ErrnoError"` instead |
| constructor(errno) { |
| this.errno = errno; |
| } |
| }, |
| FSStream:class { |
| shared = {}; |
| get object() { |
| return this.node; |
| } |
| set object(val) { |
| this.node = val; |
| } |
| get isRead() { |
| return (this.flags & 2097155) !== 1; |
| } |
| get isWrite() { |
| return (this.flags & 2097155) !== 0; |
| } |
| get isAppend() { |
| return (this.flags & 1024); |
| } |
| get flags() { |
| return this.shared.flags; |
| } |
| set flags(val) { |
| this.shared.flags = val; |
| } |
| get position() { |
| return this.shared.position; |
| } |
| set position(val) { |
| this.shared.position = val; |
| } |
| }, |
| FSNode:class { |
| node_ops = {}; |
| stream_ops = {}; |
| readMode = 292 | 73; |
| writeMode = 146; |
| mounted = null; |
| constructor(parent, name, mode, rdev) { |
| if (!parent) { |
| parent = this; // root node sets parent to itself |
| } |
| this.parent = parent; |
| this.mount = parent.mount; |
| this.id = FS.nextInode++; |
| this.name = name; |
| this.mode = mode; |
| this.rdev = rdev; |
| this.atime = this.mtime = this.ctime = Date.now(); |
| } |
| get read() { |
| return (this.mode & this.readMode) === this.readMode; |
| } |
| set read(val) { |
| val ? this.mode |= this.readMode : this.mode &= ~this.readMode; |
| } |
| get write() { |
| return (this.mode & this.writeMode) === this.writeMode; |
| } |
| set write(val) { |
| val ? this.mode |= this.writeMode : this.mode &= ~this.writeMode; |
| } |
| get isFolder() { |
| return FS.isDir(this.mode); |
| } |
| get isDevice() { |
| return FS.isChrdev(this.mode); |
| } |
| }, |
| lookupPath(path, opts = {}) { |
| if (!path) { |
| throw new FS.ErrnoError(44); |
| } |
| opts.follow_mount ??= true |
| |
| if (!PATH.isAbs(path)) { |
| path = FS.cwd() + '/' + path; |
| } |
| |
| // limit max consecutive symlinks to 40 (SYMLOOP_MAX). |
| linkloop: for (var nlinks = 0; nlinks < 40; nlinks++) { |
| // split the absolute path |
| var parts = path.split('/').filter((p) => !!p); |
| |
| // start at the root |
| var current = FS.root; |
| var current_path = '/'; |
| |
| for (var i = 0; i < parts.length; i++) { |
| var islast = (i === parts.length-1); |
| if (islast && opts.parent) { |
| // stop resolving |
| break; |
| } |
| |
| if (parts[i] === '.') { |
| continue; |
| } |
| |
| if (parts[i] === '..') { |
| current_path = PATH.dirname(current_path); |
| if (FS.isRoot(current)) { |
| path = current_path + '/' + parts.slice(i + 1).join('/'); |
| continue linkloop; |
| } else { |
| current = current.parent; |
| } |
| continue; |
| } |
| |
| current_path = PATH.join2(current_path, parts[i]); |
| try { |
| current = FS.lookupNode(current, parts[i]); |
| } catch (e) { |
| // if noent_okay is true, suppress a ENOENT in the last component |
| // and return an object with an undefined node. This is needed for |
| // resolving symlinks in the path when creating a file. |
| if ((e?.errno === 44) && islast && opts.noent_okay) { |
| return { path: current_path }; |
| } |
| throw e; |
| } |
| |
| // jump to the mount's root node if this is a mountpoint |
| if (FS.isMountpoint(current) && (!islast || opts.follow_mount)) { |
| current = current.mounted.root; |
| } |
| |
| // by default, lookupPath will not follow a symlink if it is the final path component. |
| // setting opts.follow = true will override this behavior. |
| if (FS.isLink(current.mode) && (!islast || opts.follow)) { |
| if (!current.node_ops.readlink) { |
| throw new FS.ErrnoError(52); |
| } |
| var link = current.node_ops.readlink(current); |
| if (!PATH.isAbs(link)) { |
| link = PATH.dirname(current_path) + '/' + link; |
| } |
| path = link + '/' + parts.slice(i + 1).join('/'); |
| continue linkloop; |
| } |
| } |
| return { path: current_path, node: current }; |
| } |
| throw new FS.ErrnoError(32); |
| }, |
| getPath(node) { |
| var path; |
| while (true) { |
| if (FS.isRoot(node)) { |
| var mount = node.mount.mountpoint; |
| if (!path) return mount; |
| return mount[mount.length-1] !== '/' ? `${mount}/${path}` : mount + path; |
| } |
| path = path ? `${node.name}/${path}` : node.name; |
| node = node.parent; |
| } |
| }, |
| hashName(parentid, name) { |
| var hash = 0; |
| |
| for (var i = 0; i < name.length; i++) { |
| hash = ((hash << 5) - hash + name.charCodeAt(i)) | 0; |
| } |
| return ((parentid + hash) >>> 0) % FS.nameTable.length; |
| }, |
| hashAddNode(node) { |
| var hash = FS.hashName(node.parent.id, node.name); |
| node.name_next = FS.nameTable[hash]; |
| FS.nameTable[hash] = node; |
| }, |
| hashRemoveNode(node) { |
| var hash = FS.hashName(node.parent.id, node.name); |
| if (FS.nameTable[hash] === node) { |
| FS.nameTable[hash] = node.name_next; |
| } else { |
| var current = FS.nameTable[hash]; |
| while (current) { |
| if (current.name_next === node) { |
| current.name_next = node.name_next; |
| break; |
| } |
| current = current.name_next; |
| } |
| } |
| }, |
| lookupNode(parent, name) { |
| var errCode = FS.mayLookup(parent); |
| if (errCode) { |
| throw new FS.ErrnoError(errCode); |
| } |
| var hash = FS.hashName(parent.id, name); |
| for (var node = FS.nameTable[hash]; node; node = node.name_next) { |
| var nodeName = node.name; |
| if (node.parent.id === parent.id && nodeName === name) { |
| return node; |
| } |
| } |
| // if we failed to find it in the cache, call into the VFS |
| return FS.lookup(parent, name); |
| }, |
| createNode(parent, name, mode, rdev) { |
| var node = new FS.FSNode(parent, name, mode, rdev); |
| |
| FS.hashAddNode(node); |
| |
| return node; |
| }, |
| destroyNode(node) { |
| FS.hashRemoveNode(node); |
| }, |
| isRoot(node) { |
| return node === node.parent; |
| }, |
| isMountpoint(node) { |
| return !!node.mounted; |
| }, |
| isFile(mode) { |
| return (mode & 61440) === 32768; |
| }, |
| isDir(mode) { |
| return (mode & 61440) === 16384; |
| }, |
| isLink(mode) { |
| return (mode & 61440) === 40960; |
| }, |
| isChrdev(mode) { |
| return (mode & 61440) === 8192; |
| }, |
| isBlkdev(mode) { |
| return (mode & 61440) === 24576; |
| }, |
| isFIFO(mode) { |
| return (mode & 61440) === 4096; |
| }, |
| isSocket(mode) { |
| return (mode & 49152) === 49152; |
| }, |
| flagsToPermissionString(flag) { |
| var perms = ['r', 'w', 'rw'][flag & 3]; |
| if ((flag & 512)) { |
| perms += 'w'; |
| } |
| return perms; |
| }, |
| nodePermissions(node, perms) { |
| if (FS.ignorePermissions) { |
| return 0; |
| } |
| // return 0 if any user, group or owner bits are set. |
| if (perms.includes('r') && !(node.mode & 292)) { |
| return 2; |
| } else if (perms.includes('w') && !(node.mode & 146)) { |
| return 2; |
| } else if (perms.includes('x') && !(node.mode & 73)) { |
| return 2; |
| } |
| return 0; |
| }, |
| mayLookup(dir) { |
| if (!FS.isDir(dir.mode)) return 54; |
| var errCode = FS.nodePermissions(dir, 'x'); |
| if (errCode) return errCode; |
| if (!dir.node_ops.lookup) return 2; |
| return 0; |
| }, |
| mayCreate(dir, name) { |
| if (!FS.isDir(dir.mode)) { |
| return 54; |
| } |
| try { |
| var node = FS.lookupNode(dir, name); |
| return 20; |
| } catch (e) { |
| } |
| return FS.nodePermissions(dir, 'wx'); |
| }, |
| mayDelete(dir, name, isdir) { |
| var node; |
| try { |
| node = FS.lookupNode(dir, name); |
| } catch (e) { |
| return e.errno; |
| } |
| var errCode = FS.nodePermissions(dir, 'wx'); |
| if (errCode) { |
| return errCode; |
| } |
| if (isdir) { |
| if (!FS.isDir(node.mode)) { |
| return 54; |
| } |
| if (FS.isRoot(node) || FS.getPath(node) === FS.cwd()) { |
| return 10; |
| } |
| } else { |
| if (FS.isDir(node.mode)) { |
| return 31; |
| } |
| } |
| return 0; |
| }, |
| mayOpen(node, flags) { |
| if (!node) { |
| return 44; |
| } |
| if (FS.isLink(node.mode)) { |
| return 32; |
| } else if (FS.isDir(node.mode)) { |
| if (FS.flagsToPermissionString(flags) !== 'r' // opening for write |
| || (flags & (512 | 64))) { // TODO: check for O_SEARCH? (== search for dir only) |
| return 31; |
| } |
| } |
| return FS.nodePermissions(node, FS.flagsToPermissionString(flags)); |
| }, |
| checkOpExists(op, err) { |
| if (!op) { |
| throw new FS.ErrnoError(err); |
| } |
| return op; |
| }, |
| MAX_OPEN_FDS:4096, |
| nextfd() { |
| for (var fd = 0; fd <= FS.MAX_OPEN_FDS; fd++) { |
| if (!FS.streams[fd]) { |
| return fd; |
| } |
| } |
| throw new FS.ErrnoError(33); |
| }, |
| getStreamChecked(fd) { |
| var stream = FS.getStream(fd); |
| if (!stream) { |
| throw new FS.ErrnoError(8); |
| } |
| return stream; |
| }, |
| getStream:(fd) => FS.streams[fd], |
| createStream(stream, fd = -1) { |
| |
| // clone it, so we can return an instance of FSStream |
| stream = Object.assign(new FS.FSStream(), stream); |
| if (fd == -1) { |
| fd = FS.nextfd(); |
| } |
| stream.fd = fd; |
| FS.streams[fd] = stream; |
| return stream; |
| }, |
| closeStream(fd) { |
| FS.streams[fd] = null; |
| }, |
| dupStream(origStream, fd = -1) { |
| var stream = FS.createStream(origStream, fd); |
| stream.stream_ops?.dup?.(stream); |
| return stream; |
| }, |
| doSetAttr(stream, node, attr) { |
| var setattr = stream?.stream_ops.setattr; |
| var arg = setattr ? stream : node; |
| setattr ??= node.node_ops.setattr; |
| FS.checkOpExists(setattr, 63) |
| setattr(arg, attr); |
| }, |
| chrdev_stream_ops:{ |
| open(stream) { |
| var device = FS.getDevice(stream.node.rdev); |
| // override node's stream ops with the device's |
| stream.stream_ops = device.stream_ops; |
| // forward the open call |
| stream.stream_ops.open?.(stream); |
| }, |
| llseek() { |
| throw new FS.ErrnoError(70); |
| }, |
| }, |
| major:(dev) => ((dev) >> 8), |
| minor:(dev) => ((dev) & 0xff), |
| makedev:(ma, mi) => ((ma) << 8 | (mi)), |
| registerDevice(dev, ops) { |
| FS.devices[dev] = { stream_ops: ops }; |
| }, |
| getDevice:(dev) => FS.devices[dev], |
| getMounts(mount) { |
| var mounts = []; |
| var check = [mount]; |
| |
| while (check.length) { |
| var m = check.pop(); |
| |
| mounts.push(m); |
| |
| check.push(...m.mounts); |
| } |
| |
| return mounts; |
| }, |
| syncfs(populate, callback) { |
| if (typeof populate == 'function') { |
| callback = populate; |
| populate = false; |
| } |
| |
| FS.syncFSRequests++; |
| |
| if (FS.syncFSRequests > 1) { |
| err(`warning: ${FS.syncFSRequests} FS.syncfs operations in flight at once, probably just doing extra work`); |
| } |
| |
| var mounts = FS.getMounts(FS.root.mount); |
| var completed = 0; |
| |
| function doCallback(errCode) { |
| FS.syncFSRequests--; |
| return callback(errCode); |
| } |
| |
| function done(errCode) { |
| if (errCode) { |
| if (!done.errored) { |
| done.errored = true; |
| return doCallback(errCode); |
| } |
| return; |
| } |
| if (++completed >= mounts.length) { |
| doCallback(null); |
| } |
| }; |
| |
| // sync all mounts |
| mounts.forEach((mount) => { |
| if (!mount.type.syncfs) { |
| return done(null); |
| } |
| mount.type.syncfs(mount, populate, done); |
| }); |
| }, |
| mount(type, opts, mountpoint) { |
| var root = mountpoint === '/'; |
| var pseudo = !mountpoint; |
| var node; |
| |
| if (root && FS.root) { |
| throw new FS.ErrnoError(10); |
| } else if (!root && !pseudo) { |
| var lookup = FS.lookupPath(mountpoint, { follow_mount: false }); |
| |
| mountpoint = lookup.path; // use the absolute path |
| node = lookup.node; |
| |
| if (FS.isMountpoint(node)) { |
| throw new FS.ErrnoError(10); |
| } |
| |
| if (!FS.isDir(node.mode)) { |
| throw new FS.ErrnoError(54); |
| } |
| } |
| |
| var mount = { |
| type, |
| opts, |
| mountpoint, |
| mounts: [] |
| }; |
| |
| // create a root node for the fs |
| var mountRoot = type.mount(mount); |
| mountRoot.mount = mount; |
| mount.root = mountRoot; |
| |
| if (root) { |
| FS.root = mountRoot; |
| } else if (node) { |
| // set as a mountpoint |
| node.mounted = mount; |
| |
| // add the new mount to the current mount's children |
| if (node.mount) { |
| node.mount.mounts.push(mount); |
| } |
| } |
| |
| return mountRoot; |
| }, |
| unmount(mountpoint) { |
| var lookup = FS.lookupPath(mountpoint, { follow_mount: false }); |
| |
| if (!FS.isMountpoint(lookup.node)) { |
| throw new FS.ErrnoError(28); |
| } |
| |
| // destroy the nodes for this mount, and all its child mounts |
| var node = lookup.node; |
| var mount = node.mounted; |
| var mounts = FS.getMounts(mount); |
| |
| Object.keys(FS.nameTable).forEach((hash) => { |
| var current = FS.nameTable[hash]; |
| |
| while (current) { |
| var next = current.name_next; |
| |
| if (mounts.includes(current.mount)) { |
| FS.destroyNode(current); |
| } |
| |
| current = next; |
| } |
| }); |
| |
| // no longer a mountpoint |
| node.mounted = null; |
| |
| // remove this mount from the child mounts |
| var idx = node.mount.mounts.indexOf(mount); |
| node.mount.mounts.splice(idx, 1); |
| }, |
| lookup(parent, name) { |
| return parent.node_ops.lookup(parent, name); |
| }, |
| mknod(path, mode, dev) { |
| var lookup = FS.lookupPath(path, { parent: true }); |
| var parent = lookup.node; |
| var name = PATH.basename(path); |
| if (!name) { |
| throw new FS.ErrnoError(28); |
| } |
| if (name === '.' || name === '..') { |
| throw new FS.ErrnoError(20); |
| } |
| var errCode = FS.mayCreate(parent, name); |
| if (errCode) { |
| throw new FS.ErrnoError(errCode); |
| } |
| if (!parent.node_ops.mknod) { |
| throw new FS.ErrnoError(63); |
| } |
| return parent.node_ops.mknod(parent, name, mode, dev); |
| }, |
| statfs(path) { |
| return FS.statfsNode(FS.lookupPath(path, {follow: true}).node); |
| }, |
| statfsStream(stream) { |
| // We keep a separate statfsStream function because noderawfs overrides |
| // it. In noderawfs, stream.node is sometimes null. Instead, we need to |
| // look at stream.path. |
| return FS.statfsNode(stream.node); |
| }, |
| statfsNode(node) { |
| // NOTE: None of the defaults here are true. We're just returning safe and |
| // sane values. Currently nodefs and rawfs replace these defaults, |
| // other file systems leave them alone. |
| var rtn = { |
| bsize: 4096, |
| frsize: 4096, |
| blocks: 1e6, |
| bfree: 5e5, |
| bavail: 5e5, |
| files: FS.nextInode, |
| ffree: FS.nextInode - 1, |
| fsid: 42, |
| flags: 2, |
| namelen: 255, |
| }; |
| |
| if (node.node_ops.statfs) { |
| Object.assign(rtn, node.node_ops.statfs(node.mount.opts.root)); |
| } |
| return rtn; |
| }, |
| create(path, mode = 0o666) { |
| mode &= 4095; |
| mode |= 32768; |
| return FS.mknod(path, mode, 0); |
| }, |
| mkdir(path, mode = 0o777) { |
| mode &= 511 | 512; |
| mode |= 16384; |
| return FS.mknod(path, mode, 0); |
| }, |
| mkdirTree(path, mode) { |
| var dirs = path.split('/'); |
| var d = ''; |
| for (var dir of dirs) { |
| if (!dir) continue; |
| if (d || PATH.isAbs(path)) d += '/'; |
| d += dir; |
| try { |
| FS.mkdir(d, mode); |
| } catch(e) { |
| if (e.errno != 20) throw e; |
| } |
| } |
| }, |
| mkdev(path, mode, dev) { |
| if (typeof dev == 'undefined') { |
| dev = mode; |
| mode = 0o666; |
| } |
| mode |= 8192; |
| return FS.mknod(path, mode, dev); |
| }, |
| symlink(oldpath, newpath) { |
| if (!PATH_FS.resolve(oldpath)) { |
| throw new FS.ErrnoError(44); |
| } |
| var lookup = FS.lookupPath(newpath, { parent: true }); |
| var parent = lookup.node; |
| if (!parent) { |
| throw new FS.ErrnoError(44); |
| } |
| var newname = PATH.basename(newpath); |
| var errCode = FS.mayCreate(parent, newname); |
| if (errCode) { |
| throw new FS.ErrnoError(errCode); |
| } |
| if (!parent.node_ops.symlink) { |
| throw new FS.ErrnoError(63); |
| } |
| return parent.node_ops.symlink(parent, newname, oldpath); |
| }, |
| rename(old_path, new_path) { |
| var old_dirname = PATH.dirname(old_path); |
| var new_dirname = PATH.dirname(new_path); |
| var old_name = PATH.basename(old_path); |
| var new_name = PATH.basename(new_path); |
| // parents must exist |
| var lookup, old_dir, new_dir; |
| |
| // let the errors from non existent directories percolate up |
| lookup = FS.lookupPath(old_path, { parent: true }); |
| old_dir = lookup.node; |
| lookup = FS.lookupPath(new_path, { parent: true }); |
| new_dir = lookup.node; |
| |
| if (!old_dir || !new_dir) throw new FS.ErrnoError(44); |
| // need to be part of the same mount |
| if (old_dir.mount !== new_dir.mount) { |
| throw new FS.ErrnoError(75); |
| } |
| // source must exist |
| var old_node = FS.lookupNode(old_dir, old_name); |
| // old path should not be an ancestor of the new path |
| var relative = PATH_FS.relative(old_path, new_dirname); |
| if (relative.charAt(0) !== '.') { |
| throw new FS.ErrnoError(28); |
| } |
| // new path should not be an ancestor of the old path |
| relative = PATH_FS.relative(new_path, old_dirname); |
| if (relative.charAt(0) !== '.') { |
| throw new FS.ErrnoError(55); |
| } |
| // see if the new path already exists |
| var new_node; |
| try { |
| new_node = FS.lookupNode(new_dir, new_name); |
| } catch (e) { |
| // not fatal |
| } |
| // early out if nothing needs to change |
| if (old_node === new_node) { |
| return; |
| } |
| // we'll need to delete the old entry |
| var isdir = FS.isDir(old_node.mode); |
| var errCode = FS.mayDelete(old_dir, old_name, isdir); |
| if (errCode) { |
| throw new FS.ErrnoError(errCode); |
| } |
| // need delete permissions if we'll be overwriting. |
| // need create permissions if new doesn't already exist. |
| errCode = new_node ? |
| FS.mayDelete(new_dir, new_name, isdir) : |
| FS.mayCreate(new_dir, new_name); |
| if (errCode) { |
| throw new FS.ErrnoError(errCode); |
| } |
| if (!old_dir.node_ops.rename) { |
| throw new FS.ErrnoError(63); |
| } |
| if (FS.isMountpoint(old_node) || (new_node && FS.isMountpoint(new_node))) { |
| throw new FS.ErrnoError(10); |
| } |
| // if we are going to change the parent, check write permissions |
| if (new_dir !== old_dir) { |
| errCode = FS.nodePermissions(old_dir, 'w'); |
| if (errCode) { |
| throw new FS.ErrnoError(errCode); |
| } |
| } |
| // remove the node from the lookup hash |
| FS.hashRemoveNode(old_node); |
| // do the underlying fs rename |
| try { |
| old_dir.node_ops.rename(old_node, new_dir, new_name); |
| // update old node (we do this here to avoid each backend |
| // needing to) |
| old_node.parent = new_dir; |
| } catch (e) { |
| throw e; |
| } finally { |
| // add the node back to the hash (in case node_ops.rename |
| // changed its name) |
| FS.hashAddNode(old_node); |
| } |
| }, |
| rmdir(path) { |
| var lookup = FS.lookupPath(path, { parent: true }); |
| var parent = lookup.node; |
| var name = PATH.basename(path); |
| var node = FS.lookupNode(parent, name); |
| var errCode = FS.mayDelete(parent, name, true); |
| if (errCode) { |
| throw new FS.ErrnoError(errCode); |
| } |
| if (!parent.node_ops.rmdir) { |
| throw new FS.ErrnoError(63); |
| } |
| if (FS.isMountpoint(node)) { |
| throw new FS.ErrnoError(10); |
| } |
| parent.node_ops.rmdir(parent, name); |
| FS.destroyNode(node); |
| }, |
| readdir(path) { |
| var lookup = FS.lookupPath(path, { follow: true }); |
| var node = lookup.node; |
| var readdir = FS.checkOpExists(node.node_ops.readdir, 54); |
| return readdir(node); |
| }, |
| unlink(path) { |
| var lookup = FS.lookupPath(path, { parent: true }); |
| var parent = lookup.node; |
| if (!parent) { |
| throw new FS.ErrnoError(44); |
| } |
| var name = PATH.basename(path); |
| var node = FS.lookupNode(parent, name); |
| var errCode = FS.mayDelete(parent, name, false); |
| if (errCode) { |
| // According to POSIX, we should map EISDIR to EPERM, but |
| // we instead do what Linux does (and we must, as we use |
| // the musl linux libc). |
| throw new FS.ErrnoError(errCode); |
| } |
| if (!parent.node_ops.unlink) { |
| throw new FS.ErrnoError(63); |
| } |
| if (FS.isMountpoint(node)) { |
| throw new FS.ErrnoError(10); |
| } |
| parent.node_ops.unlink(parent, name); |
| FS.destroyNode(node); |
| }, |
| readlink(path) { |
| var lookup = FS.lookupPath(path); |
| var link = lookup.node; |
| if (!link) { |
| throw new FS.ErrnoError(44); |
| } |
| if (!link.node_ops.readlink) { |
| throw new FS.ErrnoError(28); |
| } |
| return link.node_ops.readlink(link); |
| }, |
| stat(path, dontFollow) { |
| var lookup = FS.lookupPath(path, { follow: !dontFollow }); |
| var node = lookup.node; |
| var getattr = FS.checkOpExists(node.node_ops.getattr, 63); |
| return getattr(node); |
| }, |
| fstat(fd) { |
| var stream = FS.getStreamChecked(fd); |
| var node = stream.node; |
| var getattr = stream.stream_ops.getattr; |
| var arg = getattr ? stream : node; |
| getattr ??= node.node_ops.getattr; |
| FS.checkOpExists(getattr, 63) |
| return getattr(arg); |
| }, |
| lstat(path) { |
| return FS.stat(path, true); |
| }, |
| doChmod(stream, node, mode, dontFollow) { |
| FS.doSetAttr(stream, node, { |
| mode: (mode & 4095) | (node.mode & ~4095), |
| ctime: Date.now(), |
| dontFollow |
| }); |
| }, |
| chmod(path, mode, dontFollow) { |
| var node; |
| if (typeof path == 'string') { |
| var lookup = FS.lookupPath(path, { follow: !dontFollow }); |
| node = lookup.node; |
| } else { |
| node = path; |
| } |
| FS.doChmod(null, node, mode, dontFollow); |
| }, |
| lchmod(path, mode) { |
| FS.chmod(path, mode, true); |
| }, |
| fchmod(fd, mode) { |
| var stream = FS.getStreamChecked(fd); |
| FS.doChmod(stream, stream.node, mode, false); |
| }, |
| doChown(stream, node, dontFollow) { |
| FS.doSetAttr(stream, node, { |
| timestamp: Date.now(), |
| dontFollow |
| // we ignore the uid / gid for now |
| }); |
| }, |
| chown(path, uid, gid, dontFollow) { |
| var node; |
| if (typeof path == 'string') { |
| var lookup = FS.lookupPath(path, { follow: !dontFollow }); |
| node = lookup.node; |
| } else { |
| node = path; |
| } |
| FS.doChown(null, node, dontFollow); |
| }, |
| lchown(path, uid, gid) { |
| FS.chown(path, uid, gid, true); |
| }, |
| fchown(fd, uid, gid) { |
| var stream = FS.getStreamChecked(fd); |
| FS.doChown(stream, stream.node, false); |
| }, |
| doTruncate(stream, node, len) { |
| if (FS.isDir(node.mode)) { |
| throw new FS.ErrnoError(31); |
| } |
| if (!FS.isFile(node.mode)) { |
| throw new FS.ErrnoError(28); |
| } |
| var errCode = FS.nodePermissions(node, 'w'); |
| if (errCode) { |
| throw new FS.ErrnoError(errCode); |
| } |
| FS.doSetAttr(stream, node, { |
| size: len, |
| timestamp: Date.now() |
| }); |
| }, |
| truncate(path, len) { |
| if (len < 0) { |
| throw new FS.ErrnoError(28); |
| } |
| var node; |
| if (typeof path == 'string') { |
| var lookup = FS.lookupPath(path, { follow: true }); |
| node = lookup.node; |
| } else { |
| node = path; |
| } |
| FS.doTruncate(null, node, len); |
| }, |
| ftruncate(fd, len) { |
| var stream = FS.getStreamChecked(fd); |
| if (len < 0 || (stream.flags & 2097155) === 0) { |
| throw new FS.ErrnoError(28); |
| } |
| FS.doTruncate(stream, stream.node, len); |
| }, |
| utime(path, atime, mtime) { |
| var lookup = FS.lookupPath(path, { follow: true }); |
| var node = lookup.node; |
| var setattr = FS.checkOpExists(node.node_ops.setattr, 63); |
| setattr(node, { |
| atime: atime, |
| mtime: mtime |
| }); |
| }, |
| open(path, flags, mode = 0o666) { |
| if (path === "") { |
| throw new FS.ErrnoError(44); |
| } |
| flags = typeof flags == 'string' ? FS_modeStringToFlags(flags) : flags; |
| if ((flags & 64)) { |
| mode = (mode & 4095) | 32768; |
| } else { |
| mode = 0; |
| } |
| var node; |
| var isDirPath; |
| if (typeof path == 'object') { |
| node = path; |
| } else { |
| isDirPath = path.endsWith("/"); |
| // noent_okay makes it so that if the final component of the path |
| // doesn't exist, lookupPath returns `node: undefined`. `path` will be |
| // updated to point to the target of all symlinks. |
| var lookup = FS.lookupPath(path, { |
| follow: !(flags & 131072), |
| noent_okay: true |
| }); |
| node = lookup.node; |
| path = lookup.path; |
| } |
| // perhaps we need to create the node |
| var created = false; |
| if ((flags & 64)) { |
| if (node) { |
| // if O_CREAT and O_EXCL are set, error out if the node already exists |
| if ((flags & 128)) { |
| throw new FS.ErrnoError(20); |
| } |
| } else if (isDirPath) { |
| throw new FS.ErrnoError(31); |
| } else { |
| // node doesn't exist, try to create it |
| // Ignore the permission bits here to ensure we can `open` this new |
| // file below. We use chmod below the apply the permissions once the |
| // file is open. |
| node = FS.mknod(path, mode | 0o777, 0); |
| created = true; |
| } |
| } |
| if (!node) { |
| throw new FS.ErrnoError(44); |
| } |
| // can't truncate a device |
| if (FS.isChrdev(node.mode)) { |
| flags &= ~512; |
| } |
| // if asked only for a directory, then this must be one |
| if ((flags & 65536) && !FS.isDir(node.mode)) { |
| throw new FS.ErrnoError(54); |
| } |
| // check permissions, if this is not a file we just created now (it is ok to |
| // create and write to a file with read-only permissions; it is read-only |
| // for later use) |
| if (!created) { |
| var errCode = FS.mayOpen(node, flags); |
| if (errCode) { |
| throw new FS.ErrnoError(errCode); |
| } |
| } |
| // do truncation if necessary |
| if ((flags & 512) && !created) { |
| FS.truncate(node, 0); |
| } |
| // we've already handled these, don't pass down to the underlying vfs |
| flags &= ~(128 | 512 | 131072); |
| |
| // register the stream with the filesystem |
| var stream = FS.createStream({ |
| node, |
| path: FS.getPath(node), // we want the absolute path to the node |
| flags, |
| seekable: true, |
| position: 0, |
| stream_ops: node.stream_ops, |
| // used by the file family libc calls (fopen, fwrite, ferror, etc.) |
| ungotten: [], |
| error: false |
| }); |
| // call the new stream's open function |
| if (stream.stream_ops.open) { |
| stream.stream_ops.open(stream); |
| } |
| if (created) { |
| FS.chmod(node, mode & 0o777); |
| } |
| if (Module['logReadFiles'] && !(flags & 1)) { |
| if (!(path in FS.readFiles)) { |
| FS.readFiles[path] = 1; |
| } |
| } |
| return stream; |
| }, |
| close(stream) { |
| if (FS.isClosed(stream)) { |
| throw new FS.ErrnoError(8); |
| } |
| if (stream.getdents) stream.getdents = null; // free readdir state |
| try { |
| if (stream.stream_ops.close) { |
| stream.stream_ops.close(stream); |
| } |
| } catch (e) { |
| throw e; |
| } finally { |
| FS.closeStream(stream.fd); |
| } |
| stream.fd = null; |
| }, |
| isClosed(stream) { |
| return stream.fd === null; |
| }, |
| llseek(stream, offset, whence) { |
| if (FS.isClosed(stream)) { |
| throw new FS.ErrnoError(8); |
| } |
| if (!stream.seekable || !stream.stream_ops.llseek) { |
| throw new FS.ErrnoError(70); |
| } |
| if (whence != 0 && whence != 1 && whence != 2) { |
| throw new FS.ErrnoError(28); |
| } |
| stream.position = stream.stream_ops.llseek(stream, offset, whence); |
| stream.ungotten = []; |
| return stream.position; |
| }, |
| read(stream, buffer, offset, length, position) { |
| if (length < 0 || position < 0) { |
| throw new FS.ErrnoError(28); |
| } |
| if (FS.isClosed(stream)) { |
| throw new FS.ErrnoError(8); |
| } |
| if ((stream.flags & 2097155) === 1) { |
| throw new FS.ErrnoError(8); |
| } |
| if (FS.isDir(stream.node.mode)) { |
| throw new FS.ErrnoError(31); |
| } |
| if (!stream.stream_ops.read) { |
| throw new FS.ErrnoError(28); |
| } |
| var seeking = typeof position != 'undefined'; |
| if (!seeking) { |
| position = stream.position; |
| } else if (!stream.seekable) { |
| throw new FS.ErrnoError(70); |
| } |
| var bytesRead = stream.stream_ops.read(stream, buffer, offset, length, position); |
| if (!seeking) stream.position += bytesRead; |
| return bytesRead; |
| }, |
| write(stream, buffer, offset, length, position, canOwn) { |
| if (length < 0 || position < 0) { |
| throw new FS.ErrnoError(28); |
| } |
| if (FS.isClosed(stream)) { |
| throw new FS.ErrnoError(8); |
| } |
| if ((stream.flags & 2097155) === 0) { |
| throw new FS.ErrnoError(8); |
| } |
| if (FS.isDir(stream.node.mode)) { |
| throw new FS.ErrnoError(31); |
| } |
| if (!stream.stream_ops.write) { |
| throw new FS.ErrnoError(28); |
| } |
| if (stream.seekable && stream.flags & 1024) { |
| // seek to the end before writing in append mode |
| FS.llseek(stream, 0, 2); |
| } |
| var seeking = typeof position != 'undefined'; |
| if (!seeking) { |
| position = stream.position; |
| } else if (!stream.seekable) { |
| throw new FS.ErrnoError(70); |
| } |
| var bytesWritten = stream.stream_ops.write(stream, buffer, offset, length, position, canOwn); |
| if (!seeking) stream.position += bytesWritten; |
| return bytesWritten; |
| }, |
| mmap(stream, length, position, prot, flags) { |
| // User requests writing to file (prot & PROT_WRITE != 0). |
| // Checking if we have permissions to write to the file unless |
| // MAP_PRIVATE flag is set. According to POSIX spec it is possible |
| // to write to file opened in read-only mode with MAP_PRIVATE flag, |
| // as all modifications will be visible only in the memory of |
| // the current process. |
| if ((prot & 2) !== 0 |
| && (flags & 2) === 0 |
| && (stream.flags & 2097155) !== 2) { |
| throw new FS.ErrnoError(2); |
| } |
| if ((stream.flags & 2097155) === 1) { |
| throw new FS.ErrnoError(2); |
| } |
| if (!stream.stream_ops.mmap) { |
| throw new FS.ErrnoError(43); |
| } |
| if (!length) { |
| throw new FS.ErrnoError(28); |
| } |
| return stream.stream_ops.mmap(stream, length, position, prot, flags); |
| }, |
| msync(stream, buffer, offset, length, mmapFlags) { |
| if (!stream.stream_ops.msync) { |
| return 0; |
| } |
| return stream.stream_ops.msync(stream, buffer, offset, length, mmapFlags); |
| }, |
| ioctl(stream, cmd, arg) { |
| if (!stream.stream_ops.ioctl) { |
| throw new FS.ErrnoError(59); |
| } |
| return stream.stream_ops.ioctl(stream, cmd, arg); |
| }, |
| readFile(path, opts = {}) { |
| opts.flags = opts.flags || 0; |
| opts.encoding = opts.encoding || 'binary'; |
| if (opts.encoding !== 'utf8' && opts.encoding !== 'binary') { |
| throw new Error(`Invalid encoding type "${opts.encoding}"`); |
| } |
| var stream = FS.open(path, opts.flags); |
| var stat = FS.stat(path); |
| var length = stat.size; |
| var buf = new Uint8Array(length); |
| FS.read(stream, buf, 0, length, 0); |
| if (opts.encoding === 'utf8') { |
| buf = UTF8ArrayToString(buf); |
| } |
| FS.close(stream); |
| return buf; |
| }, |
| writeFile(path, data, opts = {}) { |
| opts.flags = opts.flags || 577; |
| var stream = FS.open(path, opts.flags, opts.mode); |
| if (typeof data == 'string') { |
| data = new Uint8Array(intArrayFromString(data, true)); |
| } |
| if (ArrayBuffer.isView(data)) { |
| FS.write(stream, data, 0, data.byteLength, undefined, opts.canOwn); |
| } else { |
| throw new Error('Unsupported data type'); |
| } |
| FS.close(stream); |
| }, |
| cwd:() => FS.currentPath, |
| chdir(path) { |
| var lookup = FS.lookupPath(path, { follow: true }); |
| if (lookup.node === null) { |
| throw new FS.ErrnoError(44); |
| } |
| if (!FS.isDir(lookup.node.mode)) { |
| throw new FS.ErrnoError(54); |
| } |
| var errCode = FS.nodePermissions(lookup.node, 'x'); |
| if (errCode) { |
| throw new FS.ErrnoError(errCode); |
| } |
| FS.currentPath = lookup.path; |
| }, |
| createDefaultDirectories() { |
| FS.mkdir('/tmp'); |
| FS.mkdir('/home'); |
| FS.mkdir('/home/web_user'); |
| }, |
| createDefaultDevices() { |
| // create /dev |
| FS.mkdir('/dev'); |
| // setup /dev/null |
| FS.registerDevice(FS.makedev(1, 3), { |
| read: () => 0, |
| write: (stream, buffer, offset, length, pos) => length, |
| llseek: () => 0, |
| }); |
| FS.mkdev('/dev/null', FS.makedev(1, 3)); |
| // setup /dev/tty and /dev/tty1 |
| // stderr needs to print output using err() rather than out() |
| // so we register a second tty just for it. |
| TTY.register(FS.makedev(5, 0), TTY.default_tty_ops); |
| TTY.register(FS.makedev(6, 0), TTY.default_tty1_ops); |
| FS.mkdev('/dev/tty', FS.makedev(5, 0)); |
| FS.mkdev('/dev/tty1', FS.makedev(6, 0)); |
| // setup /dev/[u]random |
| // use a buffer to avoid overhead of individual crypto calls per byte |
| var randomBuffer = new Uint8Array(1024), randomLeft = 0; |
| var randomByte = () => { |
| if (randomLeft === 0) { |
| randomFill(randomBuffer); |
| randomLeft = randomBuffer.byteLength; |
| } |
| return randomBuffer[--randomLeft]; |
| }; |
| FS.createDevice('/dev', 'random', randomByte); |
| FS.createDevice('/dev', 'urandom', randomByte); |
| // we're not going to emulate the actual shm device, |
| // just create the tmp dirs that reside in it commonly |
| FS.mkdir('/dev/shm'); |
| FS.mkdir('/dev/shm/tmp'); |
| }, |
| createSpecialDirectories() { |
| // create /proc/self/fd which allows /proc/self/fd/6 => readlink gives the |
| // name of the stream for fd 6 (see test_unistd_ttyname) |
| FS.mkdir('/proc'); |
| var proc_self = FS.mkdir('/proc/self'); |
| FS.mkdir('/proc/self/fd'); |
| FS.mount({ |
| mount() { |
| var node = FS.createNode(proc_self, 'fd', 16895, 73); |
| node.stream_ops = { |
| llseek: MEMFS.stream_ops.llseek, |
| }; |
| node.node_ops = { |
| lookup(parent, name) { |
| var fd = +name; |
| var stream = FS.getStreamChecked(fd); |
| var ret = { |
| parent: null, |
| mount: { mountpoint: 'fake' }, |
| node_ops: { readlink: () => stream.path }, |
| id: fd + 1, |
| }; |
| ret.parent = ret; // make it look like a simple root node |
| return ret; |
| }, |
| readdir() { |
| return Array.from(FS.streams.entries()) |
| .filter(([k, v]) => v) |
| .map(([k, v]) => k.toString()); |
| } |
| }; |
| return node; |
| } |
| }, {}, '/proc/self/fd'); |
| }, |
| createStandardStreams(input, output, error) { |
| // TODO deprecate the old functionality of a single |
| // input / output callback and that utilizes FS.createDevice |
| // and instead require a unique set of stream ops |
| |
| // by default, we symlink the standard streams to the |
| // default tty devices. however, if the standard streams |
| // have been overwritten we create a unique device for |
| // them instead. |
| if (input) { |
| FS.createDevice('/dev', 'stdin', input); |
| } else { |
| FS.symlink('/dev/tty', '/dev/stdin'); |
| } |
| if (output) { |
| FS.createDevice('/dev', 'stdout', null, output); |
| } else { |
| FS.symlink('/dev/tty', '/dev/stdout'); |
| } |
| if (error) { |
| FS.createDevice('/dev', 'stderr', null, error); |
| } else { |
| FS.symlink('/dev/tty1', '/dev/stderr'); |
| } |
| |
| // open default streams for the stdin, stdout and stderr devices |
| var stdin = FS.open('/dev/stdin', 0); |
| var stdout = FS.open('/dev/stdout', 1); |
| var stderr = FS.open('/dev/stderr', 1); |
| }, |
| staticInit() { |
| FS.nameTable = new Array(4096); |
| |
| FS.mount(MEMFS, {}, '/'); |
| |
| FS.createDefaultDirectories(); |
| FS.createDefaultDevices(); |
| FS.createSpecialDirectories(); |
| |
| FS.filesystems = { |
| 'MEMFS': MEMFS, |
| }; |
| }, |
| init(input, output, error) { |
| FS.initialized = true; |
| |
| // Allow Module.stdin etc. to provide defaults, if none explicitly passed to us here |
| input ??= Module['stdin']; |
| output ??= Module['stdout']; |
| error ??= Module['stderr']; |
| |
| FS.createStandardStreams(input, output, error); |
| }, |
| quit() { |
| FS.initialized = false; |
| // force-flush all streams, so we get musl std streams printed out |
| // close all of our streams |
| for (var stream of FS.streams) { |
| if (stream) { |
| FS.close(stream); |
| } |
| } |
| }, |
| findObject(path, dontResolveLastLink) { |
| var ret = FS.analyzePath(path, dontResolveLastLink); |
| if (!ret.exists) { |
| return null; |
| } |
| return ret.object; |
| }, |
| analyzePath(path, dontResolveLastLink) { |
| // operate from within the context of the symlink's target |
| try { |
| var lookup = FS.lookupPath(path, { follow: !dontResolveLastLink }); |
| path = lookup.path; |
| } catch (e) { |
| } |
| var ret = { |
| isRoot: false, exists: false, error: 0, name: null, path: null, object: null, |
| parentExists: false, parentPath: null, parentObject: null |
| }; |
| try { |
| var lookup = FS.lookupPath(path, { parent: true }); |
| ret.parentExists = true; |
| ret.parentPath = lookup.path; |
| ret.parentObject = lookup.node; |
| ret.name = PATH.basename(path); |
| lookup = FS.lookupPath(path, { follow: !dontResolveLastLink }); |
| ret.exists = true; |
| ret.path = lookup.path; |
| ret.object = lookup.node; |
| ret.name = lookup.node.name; |
| ret.isRoot = lookup.path === '/'; |
| } catch (e) { |
| ret.error = e.errno; |
| }; |
| return ret; |
| }, |
| createPath(parent, path, canRead, canWrite) { |
| parent = typeof parent == 'string' ? parent : FS.getPath(parent); |
| var parts = path.split('/').reverse(); |
| while (parts.length) { |
| var part = parts.pop(); |
| if (!part) continue; |
| var current = PATH.join2(parent, part); |
| try { |
| FS.mkdir(current); |
| } catch (e) { |
| if (e.errno != 20) throw e; |
| } |
| parent = current; |
| } |
| return current; |
| }, |
| createFile(parent, name, properties, canRead, canWrite) { |
| var path = PATH.join2(typeof parent == 'string' ? parent : FS.getPath(parent), name); |
| var mode = FS_getMode(canRead, canWrite); |
| return FS.create(path, mode); |
| }, |
| createDataFile(parent, name, data, canRead, canWrite, canOwn) { |
| var path = name; |
| if (parent) { |
| parent = typeof parent == 'string' ? parent : FS.getPath(parent); |
| path = name ? PATH.join2(parent, name) : parent; |
| } |
| var mode = FS_getMode(canRead, canWrite); |
| var node = FS.create(path, mode); |
| if (data) { |
| if (typeof data == 'string') { |
| var arr = new Array(data.length); |
| for (var i = 0, len = data.length; i < len; ++i) arr[i] = data.charCodeAt(i); |
| data = arr; |
| } |
| // make sure we can write to the file |
| FS.chmod(node, mode | 146); |
| var stream = FS.open(node, 577); |
| FS.write(stream, data, 0, data.length, 0, canOwn); |
| FS.close(stream); |
| FS.chmod(node, mode); |
| } |
| }, |
| createDevice(parent, name, input, output) { |
| var path = PATH.join2(typeof parent == 'string' ? parent : FS.getPath(parent), name); |
| var mode = FS_getMode(!!input, !!output); |
| FS.createDevice.major ??= 64; |
| var dev = FS.makedev(FS.createDevice.major++, 0); |
| // Create a fake device that a set of stream ops to emulate |
| // the old behavior. |
| FS.registerDevice(dev, { |
| open(stream) { |
| stream.seekable = false; |
| }, |
| close(stream) { |
| // flush any pending line data |
| if (output?.buffer?.length) { |
| output(10); |
| } |
| }, |
| read(stream, buffer, offset, length, pos /* ignored */) { |
| var bytesRead = 0; |
| for (var i = 0; i < length; i++) { |
| var result; |
| try { |
| result = input(); |
| } catch (e) { |
| throw new FS.ErrnoError(29); |
| } |
| if (result === undefined && bytesRead === 0) { |
| throw new FS.ErrnoError(6); |
| } |
| if (result === null || result === undefined) break; |
| bytesRead++; |
| buffer[offset+i] = result; |
| } |
| if (bytesRead) { |
| stream.node.atime = Date.now(); |
| } |
| return bytesRead; |
| }, |
| write(stream, buffer, offset, length, pos) { |
| for (var i = 0; i < length; i++) { |
| try { |
| output(buffer[offset+i]); |
| } catch (e) { |
| throw new FS.ErrnoError(29); |
| } |
| } |
| if (length) { |
| stream.node.mtime = stream.node.ctime = Date.now(); |
| } |
| return i; |
| } |
| }); |
| return FS.mkdev(path, mode, dev); |
| }, |
| forceLoadFile(obj) { |
| if (obj.isDevice || obj.isFolder || obj.link || obj.contents) return true; |
| if (typeof XMLHttpRequest != 'undefined') { |
| throw new Error("Lazy loading should have been performed (contents set) in createLazyFile, but it was not. Lazy loading only works in web workers. Use --embed-file or --preload-file in emcc on the main thread."); |
| } else { // Command-line. |
| try { |
| obj.contents = readBinary(obj.url); |
| obj.usedBytes = obj.contents.length; |
| } catch (e) { |
| throw new FS.ErrnoError(29); |
| } |
| } |
| }, |
| createLazyFile(parent, name, url, canRead, canWrite) { |
| // Lazy chunked Uint8Array (implements get and length from Uint8Array). |
| // Actual getting is abstracted away for eventual reuse. |
| class LazyUint8Array { |
| lengthKnown = false; |
| chunks = []; // Loaded chunks. Index is the chunk number |
| get(idx) { |
| if (idx > this.length-1 || idx < 0) { |
| return undefined; |
| } |
| var chunkOffset = idx % this.chunkSize; |
| var chunkNum = (idx / this.chunkSize)|0; |
| return this.getter(chunkNum)[chunkOffset]; |
| } |
| setDataGetter(getter) { |
| this.getter = getter; |
| } |
| cacheLength() { |
| // Find length |
| var xhr = new XMLHttpRequest(); |
| xhr.open('HEAD', url, false); |
| xhr.send(null); |
| if (!(xhr.status >= 200 && xhr.status < 300 || xhr.status === 304)) throw new Error("Couldn't load " + url + ". Status: " + xhr.status); |
| var datalength = Number(xhr.getResponseHeader("Content-length")); |
| var header; |
| var hasByteServing = (header = xhr.getResponseHeader("Accept-Ranges")) && header === "bytes"; |
| var usesGzip = (header = xhr.getResponseHeader("Content-Encoding")) && header === "gzip"; |
| |
| var chunkSize = 1024*1024; // Chunk size in bytes |
| |
| if (!hasByteServing) chunkSize = datalength; |
| |
| // Function to get a range from the remote URL. |
| var doXHR = (from, to) => { |
| if (from > to) throw new Error("invalid range (" + from + ", " + to + ") or no bytes requested!"); |
| if (to > datalength-1) throw new Error("only " + datalength + " bytes available! programmer error!"); |
| |
| // TODO: Use mozResponseArrayBuffer, responseStream, etc. if available. |
| var xhr = new XMLHttpRequest(); |
| xhr.open('GET', url, false); |
| if (datalength !== chunkSize) xhr.setRequestHeader("Range", "bytes=" + from + "-" + to); |
| |
| // Some hints to the browser that we want binary data. |
| xhr.responseType = 'arraybuffer'; |
| if (xhr.overrideMimeType) { |
| xhr.overrideMimeType('text/plain; charset=x-user-defined'); |
| } |
| |
| xhr.send(null); |
| if (!(xhr.status >= 200 && xhr.status < 300 || xhr.status === 304)) throw new Error("Couldn't load " + url + ". Status: " + xhr.status); |
| if (xhr.response !== undefined) { |
| return new Uint8Array(/** @type{Array<number>} */(xhr.response || [])); |
| } |
| return intArrayFromString(xhr.responseText || '', true); |
| }; |
| var lazyArray = this; |
| lazyArray.setDataGetter((chunkNum) => { |
| var start = chunkNum * chunkSize; |
| var end = (chunkNum+1) * chunkSize - 1; // including this byte |
| end = Math.min(end, datalength-1); // if datalength-1 is selected, this is the last block |
| if (typeof lazyArray.chunks[chunkNum] == 'undefined') { |
| lazyArray.chunks[chunkNum] = doXHR(start, end); |
| } |
| if (typeof lazyArray.chunks[chunkNum] == 'undefined') throw new Error('doXHR failed!'); |
| return lazyArray.chunks[chunkNum]; |
| }); |
| |
| if (usesGzip || !datalength) { |
| // if the server uses gzip or doesn't supply the length, we have to download the whole file to get the (uncompressed) length |
| chunkSize = datalength = 1; // this will force getter(0)/doXHR do download the whole file |
| datalength = this.getter(0).length; |
| chunkSize = datalength; |
| out("LazyFiles on gzip forces download of the whole file when length is accessed"); |
| } |
| |
| this._length = datalength; |
| this._chunkSize = chunkSize; |
| this.lengthKnown = true; |
| } |
| get length() { |
| if (!this.lengthKnown) { |
| this.cacheLength(); |
| } |
| return this._length; |
| } |
| get chunkSize() { |
| if (!this.lengthKnown) { |
| this.cacheLength(); |
| } |
| return this._chunkSize; |
| } |
| } |
| |
| if (typeof XMLHttpRequest != 'undefined') { |
| if (!ENVIRONMENT_IS_WORKER) throw 'Cannot do synchronous binary XHRs outside webworkers in modern browsers. Use --embed-file or --preload-file in emcc'; |
| var lazyArray = new LazyUint8Array(); |
| var properties = { isDevice: false, contents: lazyArray }; |
| } else { |
| var properties = { isDevice: false, url: url }; |
| } |
| |
| var node = FS.createFile(parent, name, properties, canRead, canWrite); |
| // This is a total hack, but I want to get this lazy file code out of the |
| // core of MEMFS. If we want to keep this lazy file concept I feel it should |
| // be its own thin LAZYFS proxying calls to MEMFS. |
| if (properties.contents) { |
| node.contents = properties.contents; |
| } else if (properties.url) { |
| node.contents = null; |
| node.url = properties.url; |
| } |
| // Add a function that defers querying the file size until it is asked the first time. |
| Object.defineProperties(node, { |
| usedBytes: { |
| get: function() { return this.contents.length; } |
| } |
| }); |
| // override each stream op with one that tries to force load the lazy file first |
| var stream_ops = {}; |
| var keys = Object.keys(node.stream_ops); |
| keys.forEach((key) => { |
| var fn = node.stream_ops[key]; |
| stream_ops[key] = (...args) => { |
| FS.forceLoadFile(node); |
| return fn(...args); |
| }; |
| }); |
| function writeChunks(stream, buffer, offset, length, position) { |
| var contents = stream.node.contents; |
| if (position >= contents.length) |
| return 0; |
| var size = Math.min(contents.length - position, length); |
| if (contents.slice) { // normal array |
| for (var i = 0; i < size; i++) { |
| buffer[offset + i] = contents[position + i]; |
| } |
| } else { |
| for (var i = 0; i < size; i++) { // LazyUint8Array from sync binary XHR |
| buffer[offset + i] = contents.get(position + i); |
| } |
| } |
| return size; |
| } |
| // use a custom read function |
| stream_ops.read = (stream, buffer, offset, length, position) => { |
| FS.forceLoadFile(node); |
| return writeChunks(stream, buffer, offset, length, position) |
| }; |
| // use a custom mmap function |
| stream_ops.mmap = (stream, length, position, prot, flags) => { |
| FS.forceLoadFile(node); |
| var ptr = mmapAlloc(length); |
| if (!ptr) { |
| throw new FS.ErrnoError(48); |
| } |
| writeChunks(stream, HEAP8, ptr, length, position); |
| return { ptr, allocated: true }; |
| }; |
| node.stream_ops = stream_ops; |
| return node; |
| }, |
| }; |
| |
| |
| /** |
| * Given a pointer 'ptr' to a null-terminated UTF8-encoded string in the |
| * emscripten HEAP, returns a copy of that string as a Javascript String object. |
| * |
| * @param {number} ptr |
| * @param {number=} maxBytesToRead - An optional length that specifies the |
| * maximum number of bytes to read. You can omit this parameter to scan the |
| * string until the first 0 byte. If maxBytesToRead is passed, and the string |
| * at [ptr, ptr+maxBytesToReadr[ contains a null byte in the middle, then the |
| * string will cut short at that byte index (i.e. maxBytesToRead will not |
| * produce a string of exact length [ptr, ptr+maxBytesToRead[) N.B. mixing |
| * frequent uses of UTF8ToString() with and without maxBytesToRead may throw |
| * JS JIT optimizations off, so it is worth to consider consistently using one |
| * @return {string} |
| */ |
| var UTF8ToString = (ptr, maxBytesToRead) => { |
| return ptr ? UTF8ArrayToString(HEAPU8, ptr, maxBytesToRead) : ''; |
| }; |
| var SYSCALLS = { |
| DEFAULT_POLLMASK:5, |
| calculateAt(dirfd, path, allowEmpty) { |
| if (PATH.isAbs(path)) { |
| return path; |
| } |
| // relative path |
| var dir; |
| if (dirfd === -100) { |
| dir = FS.cwd(); |
| } else { |
| var dirstream = SYSCALLS.getStreamFromFD(dirfd); |
| dir = dirstream.path; |
| } |
| if (path.length == 0) { |
| if (!allowEmpty) { |
| throw new FS.ErrnoError(44);; |
| } |
| return dir; |
| } |
| return dir + '/' + path; |
| }, |
| writeStat(buf, stat) { |
| HEAP32[((buf)>>2)] = stat.dev; |
| HEAP32[(((buf)+(4))>>2)] = stat.mode; |
| HEAPU32[(((buf)+(8))>>2)] = stat.nlink; |
| HEAP32[(((buf)+(12))>>2)] = stat.uid; |
| HEAP32[(((buf)+(16))>>2)] = stat.gid; |
| HEAP32[(((buf)+(20))>>2)] = stat.rdev; |
| HEAP64[(((buf)+(24))>>3)] = BigInt(stat.size); |
| HEAP32[(((buf)+(32))>>2)] = 4096; |
| HEAP32[(((buf)+(36))>>2)] = stat.blocks; |
| var atime = stat.atime.getTime(); |
| var mtime = stat.mtime.getTime(); |
| var ctime = stat.ctime.getTime(); |
| HEAP64[(((buf)+(40))>>3)] = BigInt(Math.floor(atime / 1000)); |
| HEAPU32[(((buf)+(48))>>2)] = (atime % 1000) * 1000 * 1000; |
| HEAP64[(((buf)+(56))>>3)] = BigInt(Math.floor(mtime / 1000)); |
| HEAPU32[(((buf)+(64))>>2)] = (mtime % 1000) * 1000 * 1000; |
| HEAP64[(((buf)+(72))>>3)] = BigInt(Math.floor(ctime / 1000)); |
| HEAPU32[(((buf)+(80))>>2)] = (ctime % 1000) * 1000 * 1000; |
| HEAP64[(((buf)+(88))>>3)] = BigInt(stat.ino); |
| return 0; |
| }, |
| writeStatFs(buf, stats) { |
| HEAP32[(((buf)+(4))>>2)] = stats.bsize; |
| HEAP32[(((buf)+(40))>>2)] = stats.bsize; |
| HEAP32[(((buf)+(8))>>2)] = stats.blocks; |
| HEAP32[(((buf)+(12))>>2)] = stats.bfree; |
| HEAP32[(((buf)+(16))>>2)] = stats.bavail; |
| HEAP32[(((buf)+(20))>>2)] = stats.files; |
| HEAP32[(((buf)+(24))>>2)] = stats.ffree; |
| HEAP32[(((buf)+(28))>>2)] = stats.fsid; |
| HEAP32[(((buf)+(44))>>2)] = stats.flags; // ST_NOSUID |
| HEAP32[(((buf)+(36))>>2)] = stats.namelen; |
| }, |
| doMsync(addr, stream, len, flags, offset) { |
| if (!FS.isFile(stream.node.mode)) { |
| throw new FS.ErrnoError(43); |
| } |
| if (flags & 2) { |
| // MAP_PRIVATE calls need not to be synced back to underlying fs |
| return 0; |
| } |
| var buffer = HEAPU8.slice(addr, addr + len); |
| FS.msync(stream, buffer, offset, len, flags); |
| }, |
| getStreamFromFD(fd) { |
| var stream = FS.getStreamChecked(fd); |
| return stream; |
| }, |
| varargs:undefined, |
| getStr(ptr) { |
| var ret = UTF8ToString(ptr); |
| return ret; |
| }, |
| }; |
| function ___syscall_fcntl64(fd, cmd, varargs) { |
| SYSCALLS.varargs = varargs; |
| try { |
| |
| var stream = SYSCALLS.getStreamFromFD(fd); |
| switch (cmd) { |
| case 0: { |
| var arg = syscallGetVarargI(); |
| if (arg < 0) { |
| return -28; |
| } |
| while (FS.streams[arg]) { |
| arg++; |
| } |
| var newStream; |
| newStream = FS.dupStream(stream, arg); |
| return newStream.fd; |
| } |
| case 1: |
| case 2: |
| return 0; // FD_CLOEXEC makes no sense for a single process. |
| case 3: |
| return stream.flags; |
| case 4: { |
| var arg = syscallGetVarargI(); |
| stream.flags |= arg; |
| return 0; |
| } |
| case 12: { |
| var arg = syscallGetVarargP(); |
| var offset = 0; |
| // We're always unlocked. |
| HEAP16[(((arg)+(offset))>>1)] = 2; |
| return 0; |
| } |
| case 13: |
| case 14: |
| // Pretend that the locking is successful. These are process-level locks, |
| // and Emscripten programs are a single process. If we supported linking a |
| // filesystem between programs, we'd need to do more here. |
| // See https://github.com/emscripten-core/emscripten/issues/23697 |
| return 0; |
| } |
| return -28; |
| } catch (e) { |
| if (typeof FS == 'undefined' || !(e.name === 'ErrnoError')) throw e; |
| return -e.errno; |
| } |
| } |
| |
| function ___syscall_fstat64(fd, buf) { |
| try { |
| |
| return SYSCALLS.writeStat(buf, FS.fstat(fd)); |
| } catch (e) { |
| if (typeof FS == 'undefined' || !(e.name === 'ErrnoError')) throw e; |
| return -e.errno; |
| } |
| } |
| |
| |
| function ___syscall_ioctl(fd, op, varargs) { |
| SYSCALLS.varargs = varargs; |
| try { |
| |
| var stream = SYSCALLS.getStreamFromFD(fd); |
| switch (op) { |
| case 21509: { |
| if (!stream.tty) return -59; |
| return 0; |
| } |
| case 21505: { |
| if (!stream.tty) return -59; |
| if (stream.tty.ops.ioctl_tcgets) { |
| var termios = stream.tty.ops.ioctl_tcgets(stream); |
| var argp = syscallGetVarargP(); |
| HEAP32[((argp)>>2)] = termios.c_iflag || 0; |
| HEAP32[(((argp)+(4))>>2)] = termios.c_oflag || 0; |
| HEAP32[(((argp)+(8))>>2)] = termios.c_cflag || 0; |
| HEAP32[(((argp)+(12))>>2)] = termios.c_lflag || 0; |
| for (var i = 0; i < 32; i++) { |
| HEAP8[(argp + i)+(17)] = termios.c_cc[i] || 0; |
| } |
| return 0; |
| } |
| return 0; |
| } |
| case 21510: |
| case 21511: |
| case 21512: { |
| if (!stream.tty) return -59; |
| return 0; // no-op, not actually adjusting terminal settings |
| } |
| case 21506: |
| case 21507: |
| case 21508: { |
| if (!stream.tty) return -59; |
| if (stream.tty.ops.ioctl_tcsets) { |
| var argp = syscallGetVarargP(); |
| var c_iflag = HEAP32[((argp)>>2)]; |
| var c_oflag = HEAP32[(((argp)+(4))>>2)]; |
| var c_cflag = HEAP32[(((argp)+(8))>>2)]; |
| var c_lflag = HEAP32[(((argp)+(12))>>2)]; |
| var c_cc = [] |
| for (var i = 0; i < 32; i++) { |
| c_cc.push(HEAP8[(argp + i)+(17)]); |
| } |
| return stream.tty.ops.ioctl_tcsets(stream.tty, op, { c_iflag, c_oflag, c_cflag, c_lflag, c_cc }); |
| } |
| return 0; // no-op, not actually adjusting terminal settings |
| } |
| case 21519: { |
| if (!stream.tty) return -59; |
| var argp = syscallGetVarargP(); |
| HEAP32[((argp)>>2)] = 0; |
| return 0; |
| } |
| case 21520: { |
| if (!stream.tty) return -59; |
| return -28; // not supported |
| } |
| case 21531: { |
| var argp = syscallGetVarargP(); |
| return FS.ioctl(stream, op, argp); |
| } |
| case 21523: { |
| // TODO: in theory we should write to the winsize struct that gets |
| // passed in, but for now musl doesn't read anything on it |
| if (!stream.tty) return -59; |
| if (stream.tty.ops.ioctl_tiocgwinsz) { |
| var winsize = stream.tty.ops.ioctl_tiocgwinsz(stream.tty); |
| var argp = syscallGetVarargP(); |
| HEAP16[((argp)>>1)] = winsize[0]; |
| HEAP16[(((argp)+(2))>>1)] = winsize[1]; |
| } |
| return 0; |
| } |
| case 21524: { |
| // TODO: technically, this ioctl call should change the window size. |
| // but, since emscripten doesn't have any concept of a terminal window |
| // yet, we'll just silently throw it away as we do TIOCGWINSZ |
| if (!stream.tty) return -59; |
| return 0; |
| } |
| case 21515: { |
| if (!stream.tty) return -59; |
| return 0; |
| } |
| default: return -28; // not supported |
| } |
| } catch (e) { |
| if (typeof FS == 'undefined' || !(e.name === 'ErrnoError')) throw e; |
| return -e.errno; |
| } |
| } |
| |
| function ___syscall_lstat64(path, buf) { |
| try { |
| |
| path = SYSCALLS.getStr(path); |
| return SYSCALLS.writeStat(buf, FS.lstat(path)); |
| } catch (e) { |
| if (typeof FS == 'undefined' || !(e.name === 'ErrnoError')) throw e; |
| return -e.errno; |
| } |
| } |
| |
| function ___syscall_newfstatat(dirfd, path, buf, flags) { |
| try { |
| |
| path = SYSCALLS.getStr(path); |
| var nofollow = flags & 256; |
| var allowEmpty = flags & 4096; |
| flags = flags & (~6400); |
| path = SYSCALLS.calculateAt(dirfd, path, allowEmpty); |
| return SYSCALLS.writeStat(buf, nofollow ? FS.lstat(path) : FS.stat(path)); |
| } catch (e) { |
| if (typeof FS == 'undefined' || !(e.name === 'ErrnoError')) throw e; |
| return -e.errno; |
| } |
| } |
| |
| |
| function ___syscall_openat(dirfd, path, flags, varargs) { |
| SYSCALLS.varargs = varargs; |
| try { |
| |
| path = SYSCALLS.getStr(path); |
| path = SYSCALLS.calculateAt(dirfd, path); |
| var mode = varargs ? syscallGetVarargI() : 0; |
| return FS.open(path, flags, mode).fd; |
| } catch (e) { |
| if (typeof FS == 'undefined' || !(e.name === 'ErrnoError')) throw e; |
| return -e.errno; |
| } |
| } |
| |
| function ___syscall_stat64(path, buf) { |
| try { |
| |
| path = SYSCALLS.getStr(path); |
| return SYSCALLS.writeStat(buf, FS.stat(path)); |
| } catch (e) { |
| if (typeof FS == 'undefined' || !(e.name === 'ErrnoError')) throw e; |
| return -e.errno; |
| } |
| } |
| |
| var __abort_js = () => |
| abort(''); |
| |
| var structRegistrations = { |
| }; |
| |
| var runDestructors = (destructors) => { |
| while (destructors.length) { |
| var ptr = destructors.pop(); |
| var del = destructors.pop(); |
| del(ptr); |
| } |
| }; |
| |
| /** @suppress {globalThis} */ |
| function readPointer(pointer) { |
| return this['fromWireType'](HEAPU32[((pointer)>>2)]); |
| } |
| |
| var awaitingDependencies = { |
| }; |
| |
| var registeredTypes = { |
| }; |
| |
| var typeDependencies = { |
| }; |
| |
| var InternalError = class InternalError extends Error { constructor(message) { super(message); this.name = 'InternalError'; }}; |
| var throwInternalError = (message) => { throw new InternalError(message); }; |
| var whenDependentTypesAreResolved = (myTypes, dependentTypes, getTypeConverters) => { |
| myTypes.forEach((type) => typeDependencies[type] = dependentTypes); |
| |
| function onComplete(typeConverters) { |
| var myTypeConverters = getTypeConverters(typeConverters); |
| if (myTypeConverters.length !== myTypes.length) { |
| throwInternalError('Mismatched type converter count'); |
| } |
| for (var i = 0; i < myTypes.length; ++i) { |
| registerType(myTypes[i], myTypeConverters[i]); |
| } |
| } |
| |
| var typeConverters = new Array(dependentTypes.length); |
| var unregisteredTypes = []; |
| var registered = 0; |
| dependentTypes.forEach((dt, i) => { |
| if (registeredTypes.hasOwnProperty(dt)) { |
| typeConverters[i] = registeredTypes[dt]; |
| } else { |
| unregisteredTypes.push(dt); |
| if (!awaitingDependencies.hasOwnProperty(dt)) { |
| awaitingDependencies[dt] = []; |
| } |
| awaitingDependencies[dt].push(() => { |
| typeConverters[i] = registeredTypes[dt]; |
| ++registered; |
| if (registered === unregisteredTypes.length) { |
| onComplete(typeConverters); |
| } |
| }); |
| } |
| }); |
| if (0 === unregisteredTypes.length) { |
| onComplete(typeConverters); |
| } |
| }; |
| var __embind_finalize_value_object = (structType) => { |
| var reg = structRegistrations[structType]; |
| delete structRegistrations[structType]; |
| |
| var rawConstructor = reg.rawConstructor; |
| var rawDestructor = reg.rawDestructor; |
| var fieldRecords = reg.fields; |
| var fieldTypes = fieldRecords.map((field) => field.getterReturnType). |
| concat(fieldRecords.map((field) => field.setterArgumentType)); |
| whenDependentTypesAreResolved([structType], fieldTypes, (fieldTypes) => { |
| var fields = {}; |
| fieldRecords.forEach((field, i) => { |
| var fieldName = field.fieldName; |
| var getterReturnType = fieldTypes[i]; |
| var optional = fieldTypes[i].optional; |
| var getter = field.getter; |
| var getterContext = field.getterContext; |
| var setterArgumentType = fieldTypes[i + fieldRecords.length]; |
| var setter = field.setter; |
| var setterContext = field.setterContext; |
| fields[fieldName] = { |
| read: (ptr) => getterReturnType['fromWireType'](getter(getterContext, ptr)), |
| write: (ptr, o) => { |
| var destructors = []; |
| setter(setterContext, ptr, setterArgumentType['toWireType'](destructors, o)); |
| runDestructors(destructors); |
| }, |
| optional, |
| }; |
| }); |
| |
| return [{ |
| name: reg.name, |
| 'fromWireType': (ptr) => { |
| var rv = {}; |
| for (var i in fields) { |
| rv[i] = fields[i].read(ptr); |
| } |
| rawDestructor(ptr); |
| return rv; |
| }, |
| 'toWireType': (destructors, o) => { |
| // todo: Here we have an opportunity for -O3 level "unsafe" optimizations: |
| // assume all fields are present without checking. |
| for (var fieldName in fields) { |
| if (!(fieldName in o) && !fields[fieldName].optional) { |
| throw new TypeError(`Missing field: "${fieldName}"`); |
| } |
| } |
| var ptr = rawConstructor(); |
| for (fieldName in fields) { |
| fields[fieldName].write(ptr, o[fieldName]); |
| } |
| if (destructors !== null) { |
| destructors.push(rawDestructor, ptr); |
| } |
| return ptr; |
| }, |
| argPackAdvance: GenericWireTypeSize, |
| 'readValueFromPointer': readPointer, |
| destructorFunction: rawDestructor, |
| }]; |
| }); |
| }; |
| |
| var AsciiToString = (ptr) => { |
| var str = ''; |
| while (1) { |
| var ch = HEAPU8[ptr++]; |
| if (!ch) return str; |
| str += String.fromCharCode(ch); |
| } |
| }; |
| |
| |
| |
| |
| var BindingError = class BindingError extends Error { constructor(message) { super(message); this.name = 'BindingError'; }}; |
| var throwBindingError = (message) => { throw new BindingError(message); }; |
| /** @param {Object=} options */ |
| function sharedRegisterType(rawType, registeredInstance, options = {}) { |
| var name = registeredInstance.name; |
| if (!rawType) { |
| throwBindingError(`type "${name}" must have a positive integer typeid pointer`); |
| } |
| if (registeredTypes.hasOwnProperty(rawType)) { |
| if (options.ignoreDuplicateRegistrations) { |
| return; |
| } else { |
| throwBindingError(`Cannot register type '${name}' twice`); |
| } |
| } |
| |
| registeredTypes[rawType] = registeredInstance; |
| delete typeDependencies[rawType]; |
| |
| if (awaitingDependencies.hasOwnProperty(rawType)) { |
| var callbacks = awaitingDependencies[rawType]; |
| delete awaitingDependencies[rawType]; |
| callbacks.forEach((cb) => cb()); |
| } |
| } |
| /** @param {Object=} options */ |
| function registerType(rawType, registeredInstance, options = {}) { |
| return sharedRegisterType(rawType, registeredInstance, options); |
| } |
| |
| var integerReadValueFromPointer = (name, width, signed) => { |
| // integers are quite common, so generate very specialized functions |
| switch (width) { |
| case 1: return signed ? |
| (pointer) => HEAP8[pointer] : |
| (pointer) => HEAPU8[pointer]; |
| case 2: return signed ? |
| (pointer) => HEAP16[((pointer)>>1)] : |
| (pointer) => HEAPU16[((pointer)>>1)] |
| case 4: return signed ? |
| (pointer) => HEAP32[((pointer)>>2)] : |
| (pointer) => HEAPU32[((pointer)>>2)] |
| case 8: return signed ? |
| (pointer) => HEAP64[((pointer)>>3)] : |
| (pointer) => HEAPU64[((pointer)>>3)] |
| default: |
| throw new TypeError(`invalid integer width (${width}): ${name}`); |
| } |
| }; |
| /** @suppress {globalThis} */ |
| var __embind_register_bigint = (primitiveType, name, size, minRange, maxRange) => { |
| name = AsciiToString(name); |
| |
| const isUnsignedType = minRange === 0n; |
| |
| let fromWireType = (value) => value; |
| if (isUnsignedType) { |
| // uint64 get converted to int64 in ABI, fix them up like we do for 32-bit integers. |
| const bitSize = size * 8; |
| fromWireType = (value) => { |
| return BigInt.asUintN(bitSize, value); |
| } |
| maxRange = fromWireType(maxRange); |
| } |
| |
| registerType(primitiveType, { |
| name, |
| 'fromWireType': fromWireType, |
| 'toWireType': (destructors, value) => { |
| if (typeof value == "number") { |
| value = BigInt(value); |
| } |
| return value; |
| }, |
| argPackAdvance: GenericWireTypeSize, |
| 'readValueFromPointer': integerReadValueFromPointer(name, size, !isUnsignedType), |
| destructorFunction: null, // This type does not need a destructor |
| }); |
| }; |
| |
| |
| |
| var GenericWireTypeSize = 8; |
| /** @suppress {globalThis} */ |
| var __embind_register_bool = (rawType, name, trueValue, falseValue) => { |
| name = AsciiToString(name); |
| registerType(rawType, { |
| name, |
| 'fromWireType': function(wt) { |
| // ambiguous emscripten ABI: sometimes return values are |
| // true or false, and sometimes integers (0 or 1) |
| return !!wt; |
| }, |
| 'toWireType': function(destructors, o) { |
| return o ? trueValue : falseValue; |
| }, |
| argPackAdvance: GenericWireTypeSize, |
| 'readValueFromPointer': function(pointer) { |
| return this['fromWireType'](HEAPU8[pointer]); |
| }, |
| destructorFunction: null, // This type does not need a destructor |
| }); |
| }; |
| |
| |
| |
| var shallowCopyInternalPointer = (o) => { |
| return { |
| count: o.count, |
| deleteScheduled: o.deleteScheduled, |
| preservePointerOnDelete: o.preservePointerOnDelete, |
| ptr: o.ptr, |
| ptrType: o.ptrType, |
| smartPtr: o.smartPtr, |
| smartPtrType: o.smartPtrType, |
| }; |
| }; |
| |
| var throwInstanceAlreadyDeleted = (obj) => { |
| function getInstanceTypeName(handle) { |
| return handle.$$.ptrType.registeredClass.name; |
| } |
| throwBindingError(getInstanceTypeName(obj) + ' instance already deleted'); |
| }; |
| |
| var finalizationRegistry = false; |
| |
| var detachFinalizer = (handle) => {}; |
| |
| var runDestructor = ($$) => { |
| if ($$.smartPtr) { |
| $$.smartPtrType.rawDestructor($$.smartPtr); |
| } else { |
| $$.ptrType.registeredClass.rawDestructor($$.ptr); |
| } |
| }; |
| var releaseClassHandle = ($$) => { |
| $$.count.value -= 1; |
| var toDelete = 0 === $$.count.value; |
| if (toDelete) { |
| runDestructor($$); |
| } |
| }; |
| var attachFinalizer = (handle) => { |
| if ('undefined' === typeof FinalizationRegistry) { |
| attachFinalizer = (handle) => handle; |
| return handle; |
| } |
| // If the running environment has a FinalizationRegistry (see |
| // https://github.com/tc39/proposal-weakrefs), then attach finalizers |
| // for class handles. We check for the presence of FinalizationRegistry |
| // at run-time, not build-time. |
| finalizationRegistry = new FinalizationRegistry((info) => { |
| releaseClassHandle(info.$$); |
| }); |
| attachFinalizer = (handle) => { |
| var $$ = handle.$$; |
| var hasSmartPtr = !!$$.smartPtr; |
| if (hasSmartPtr) { |
| // We should not call the destructor on raw pointers in case other code expects the pointee to live |
| var info = { $$: $$ }; |
| finalizationRegistry.register(handle, info, handle); |
| } |
| return handle; |
| }; |
| detachFinalizer = (handle) => finalizationRegistry.unregister(handle); |
| return attachFinalizer(handle); |
| }; |
| |
| |
| |
| |
| var deletionQueue = []; |
| var flushPendingDeletes = () => { |
| while (deletionQueue.length) { |
| var obj = deletionQueue.pop(); |
| obj.$$.deleteScheduled = false; |
| obj['delete'](); |
| } |
| }; |
| |
| var delayFunction; |
| var init_ClassHandle = () => { |
| let proto = ClassHandle.prototype; |
| |
| Object.assign(proto, { |
| "isAliasOf"(other) { |
| if (!(this instanceof ClassHandle)) { |
| return false; |
| } |
| if (!(other instanceof ClassHandle)) { |
| return false; |
| } |
| |
| var leftClass = this.$$.ptrType.registeredClass; |
| var left = this.$$.ptr; |
| other.$$ = /** @type {Object} */ (other.$$); |
| var rightClass = other.$$.ptrType.registeredClass; |
| var right = other.$$.ptr; |
| |
| while (leftClass.baseClass) { |
| left = leftClass.upcast(left); |
| leftClass = leftClass.baseClass; |
| } |
| |
| while (rightClass.baseClass) { |
| right = rightClass.upcast(right); |
| rightClass = rightClass.baseClass; |
| } |
| |
| return leftClass === rightClass && left === right; |
| }, |
| |
| "clone"() { |
| if (!this.$$.ptr) { |
| throwInstanceAlreadyDeleted(this); |
| } |
| |
| if (this.$$.preservePointerOnDelete) { |
| this.$$.count.value += 1; |
| return this; |
| } else { |
| var clone = attachFinalizer(Object.create(Object.getPrototypeOf(this), { |
| $$: { |
| value: shallowCopyInternalPointer(this.$$), |
| } |
| })); |
| |
| clone.$$.count.value += 1; |
| clone.$$.deleteScheduled = false; |
| return clone; |
| } |
| }, |
| |
| "delete"() { |
| if (!this.$$.ptr) { |
| throwInstanceAlreadyDeleted(this); |
| } |
| |
| if (this.$$.deleteScheduled && !this.$$.preservePointerOnDelete) { |
| throwBindingError('Object already scheduled for deletion'); |
| } |
| |
| detachFinalizer(this); |
| releaseClassHandle(this.$$); |
| |
| if (!this.$$.preservePointerOnDelete) { |
| this.$$.smartPtr = undefined; |
| this.$$.ptr = undefined; |
| } |
| }, |
| |
| "isDeleted"() { |
| return !this.$$.ptr; |
| }, |
| |
| "deleteLater"() { |
| if (!this.$$.ptr) { |
| throwInstanceAlreadyDeleted(this); |
| } |
| if (this.$$.deleteScheduled && !this.$$.preservePointerOnDelete) { |
| throwBindingError('Object already scheduled for deletion'); |
| } |
| deletionQueue.push(this); |
| if (deletionQueue.length === 1 && delayFunction) { |
| delayFunction(flushPendingDeletes); |
| } |
| this.$$.deleteScheduled = true; |
| return this; |
| }, |
| }); |
| |
| // Support `using ...` from https://github.com/tc39/proposal-explicit-resource-management. |
| const symbolDispose = Symbol.dispose; |
| if (symbolDispose) { |
| proto[symbolDispose] = proto['delete']; |
| } |
| }; |
| /** @constructor */ |
| function ClassHandle() { |
| } |
| |
| var createNamedFunction = (name, func) => Object.defineProperty(func, 'name', { value: name }); |
| |
| var registeredPointers = { |
| }; |
| |
| var ensureOverloadTable = (proto, methodName, humanName) => { |
| if (undefined === proto[methodName].overloadTable) { |
| var prevFunc = proto[methodName]; |
| // Inject an overload resolver function that routes to the appropriate overload based on the number of arguments. |
| proto[methodName] = function(...args) { |
| // TODO This check can be removed in -O3 level "unsafe" optimizations. |
| if (!proto[methodName].overloadTable.hasOwnProperty(args.length)) { |
| throwBindingError(`Function '${humanName}' called with an invalid number of arguments (${args.length}) - expects one of (${proto[methodName].overloadTable})!`); |
| } |
| return proto[methodName].overloadTable[args.length].apply(this, args); |
| }; |
| // Move the previous function into the overload table. |
| proto[methodName].overloadTable = []; |
| proto[methodName].overloadTable[prevFunc.argCount] = prevFunc; |
| } |
| }; |
| |
| /** @param {number=} numArguments */ |
| var exposePublicSymbol = (name, value, numArguments) => { |
| if (Module.hasOwnProperty(name)) { |
| if (undefined === numArguments || (undefined !== Module[name].overloadTable && undefined !== Module[name].overloadTable[numArguments])) { |
| throwBindingError(`Cannot register public name '${name}' twice`); |
| } |
| |
| // We are exposing a function with the same name as an existing function. Create an overload table and a function selector |
| // that routes between the two. |
| ensureOverloadTable(Module, name, name); |
| if (Module[name].overloadTable.hasOwnProperty(numArguments)) { |
| throwBindingError(`Cannot register multiple overloads of a function with the same number of arguments (${numArguments})!`); |
| } |
| // Add the new function into the overload table. |
| Module[name].overloadTable[numArguments] = value; |
| } else { |
| Module[name] = value; |
| Module[name].argCount = numArguments; |
| } |
| }; |
| |
| var char_0 = 48; |
| |
| var char_9 = 57; |
| var makeLegalFunctionName = (name) => { |
| name = name.replace(/[^a-zA-Z0-9_]/g, '$'); |
| var f = name.charCodeAt(0); |
| if (f >= char_0 && f <= char_9) { |
| return `_${name}`; |
| } |
| return name; |
| }; |
| |
| |
| /** @constructor */ |
| function RegisteredClass(name, |
| constructor, |
| instancePrototype, |
| rawDestructor, |
| baseClass, |
| getActualType, |
| upcast, |
| downcast) { |
| this.name = name; |
| this.constructor = constructor; |
| this.instancePrototype = instancePrototype; |
| this.rawDestructor = rawDestructor; |
| this.baseClass = baseClass; |
| this.getActualType = getActualType; |
| this.upcast = upcast; |
| this.downcast = downcast; |
| this.pureVirtualFunctions = []; |
| } |
| |
| |
| var upcastPointer = (ptr, ptrClass, desiredClass) => { |
| while (ptrClass !== desiredClass) { |
| if (!ptrClass.upcast) { |
| throwBindingError(`Expected null or instance of ${desiredClass.name}, got an instance of ${ptrClass.name}`); |
| } |
| ptr = ptrClass.upcast(ptr); |
| ptrClass = ptrClass.baseClass; |
| } |
| return ptr; |
| }; |
| |
| var embindRepr = (v) => { |
| if (v === null) { |
| return 'null'; |
| } |
| var t = typeof v; |
| if (t === 'object' || t === 'array' || t === 'function') { |
| return v.toString(); |
| } else { |
| return '' + v; |
| } |
| }; |
| /** @suppress {globalThis} */ |
| function constNoSmartPtrRawPointerToWireType(destructors, handle) { |
| if (handle === null) { |
| if (this.isReference) { |
| throwBindingError(`null is not a valid ${this.name}`); |
| } |
| return 0; |
| } |
| |
| if (!handle.$$) { |
| throwBindingError(`Cannot pass "${embindRepr(handle)}" as a ${this.name}`); |
| } |
| if (!handle.$$.ptr) { |
| throwBindingError(`Cannot pass deleted object as a pointer of type ${this.name}`); |
| } |
| var handleClass = handle.$$.ptrType.registeredClass; |
| var ptr = upcastPointer(handle.$$.ptr, handleClass, this.registeredClass); |
| return ptr; |
| } |
| |
| |
| /** @suppress {globalThis} */ |
| function genericPointerToWireType(destructors, handle) { |
| var ptr; |
| if (handle === null) { |
| if (this.isReference) { |
| throwBindingError(`null is not a valid ${this.name}`); |
| } |
| |
| if (this.isSmartPointer) { |
| ptr = this.rawConstructor(); |
| if (destructors !== null) { |
| destructors.push(this.rawDestructor, ptr); |
| } |
| return ptr; |
| } else { |
| return 0; |
| } |
| } |
| |
| if (!handle || !handle.$$) { |
| throwBindingError(`Cannot pass "${embindRepr(handle)}" as a ${this.name}`); |
| } |
| if (!handle.$$.ptr) { |
| throwBindingError(`Cannot pass deleted object as a pointer of type ${this.name}`); |
| } |
| if (!this.isConst && handle.$$.ptrType.isConst) { |
| throwBindingError(`Cannot convert argument of type ${(handle.$$.smartPtrType ? handle.$$.smartPtrType.name : handle.$$.ptrType.name)} to parameter type ${this.name}`); |
| } |
| var handleClass = handle.$$.ptrType.registeredClass; |
| ptr = upcastPointer(handle.$$.ptr, handleClass, this.registeredClass); |
| |
| if (this.isSmartPointer) { |
| // TODO: this is not strictly true |
| // We could support BY_EMVAL conversions from raw pointers to smart pointers |
| // because the smart pointer can hold a reference to the handle |
| if (undefined === handle.$$.smartPtr) { |
| throwBindingError('Passing raw pointer to smart pointer is illegal'); |
| } |
| |
| switch (this.sharingPolicy) { |
| case 0: // NONE |
| // no upcasting |
| if (handle.$$.smartPtrType === this) { |
| ptr = handle.$$.smartPtr; |
| } else { |
| throwBindingError(`Cannot convert argument of type ${(handle.$$.smartPtrType ? handle.$$.smartPtrType.name : handle.$$.ptrType.name)} to parameter type ${this.name}`); |
| } |
| break; |
| |
| case 1: // INTRUSIVE |
| ptr = handle.$$.smartPtr; |
| break; |
| |
| case 2: // BY_EMVAL |
| if (handle.$$.smartPtrType === this) { |
| ptr = handle.$$.smartPtr; |
| } else { |
| var clonedHandle = handle['clone'](); |
| ptr = this.rawShare( |
| ptr, |
| Emval.toHandle(() => clonedHandle['delete']()) |
| ); |
| if (destructors !== null) { |
| destructors.push(this.rawDestructor, ptr); |
| } |
| } |
| break; |
| |
| default: |
| throwBindingError('Unsupporting sharing policy'); |
| } |
| } |
| return ptr; |
| } |
| |
| |
| |
| /** @suppress {globalThis} */ |
| function nonConstNoSmartPtrRawPointerToWireType(destructors, handle) { |
| if (handle === null) { |
| if (this.isReference) { |
| throwBindingError(`null is not a valid ${this.name}`); |
| } |
| return 0; |
| } |
| |
| if (!handle.$$) { |
| throwBindingError(`Cannot pass "${embindRepr(handle)}" as a ${this.name}`); |
| } |
| if (!handle.$$.ptr) { |
| throwBindingError(`Cannot pass deleted object as a pointer of type ${this.name}`); |
| } |
| if (handle.$$.ptrType.isConst) { |
| throwBindingError(`Cannot convert argument of type ${handle.$$.ptrType.name} to parameter type ${this.name}`); |
| } |
| var handleClass = handle.$$.ptrType.registeredClass; |
| var ptr = upcastPointer(handle.$$.ptr, handleClass, this.registeredClass); |
| return ptr; |
| } |
| |
| |
| |
| var downcastPointer = (ptr, ptrClass, desiredClass) => { |
| if (ptrClass === desiredClass) { |
| return ptr; |
| } |
| if (undefined === desiredClass.baseClass) { |
| return null; // no conversion |
| } |
| |
| var rv = downcastPointer(ptr, ptrClass, desiredClass.baseClass); |
| if (rv === null) { |
| return null; |
| } |
| return desiredClass.downcast(rv); |
| }; |
| |
| |
| var registeredInstances = { |
| }; |
| |
| var getBasestPointer = (class_, ptr) => { |
| if (ptr === undefined) { |
| throwBindingError('ptr should not be undefined'); |
| } |
| while (class_.baseClass) { |
| ptr = class_.upcast(ptr); |
| class_ = class_.baseClass; |
| } |
| return ptr; |
| }; |
| var getInheritedInstance = (class_, ptr) => { |
| ptr = getBasestPointer(class_, ptr); |
| return registeredInstances[ptr]; |
| }; |
| |
| |
| var makeClassHandle = (prototype, record) => { |
| if (!record.ptrType || !record.ptr) { |
| throwInternalError('makeClassHandle requires ptr and ptrType'); |
| } |
| var hasSmartPtrType = !!record.smartPtrType; |
| var hasSmartPtr = !!record.smartPtr; |
| if (hasSmartPtrType !== hasSmartPtr) { |
| throwInternalError('Both smartPtrType and smartPtr must be specified'); |
| } |
| record.count = { value: 1 }; |
| return attachFinalizer(Object.create(prototype, { |
| $$: { |
| value: record, |
| writable: true, |
| }, |
| })); |
| }; |
| /** @suppress {globalThis} */ |
| function RegisteredPointer_fromWireType(ptr) { |
| // ptr is a raw pointer (or a raw smartpointer) |
| |
| // rawPointer is a maybe-null raw pointer |
| var rawPointer = this.getPointee(ptr); |
| if (!rawPointer) { |
| this.destructor(ptr); |
| return null; |
| } |
| |
| var registeredInstance = getInheritedInstance(this.registeredClass, rawPointer); |
| if (undefined !== registeredInstance) { |
| // JS object has been neutered, time to repopulate it |
| if (0 === registeredInstance.$$.count.value) { |
| registeredInstance.$$.ptr = rawPointer; |
| registeredInstance.$$.smartPtr = ptr; |
| return registeredInstance['clone'](); |
| } else { |
| // else, just increment reference count on existing object |
| // it already has a reference to the smart pointer |
| var rv = registeredInstance['clone'](); |
| this.destructor(ptr); |
| return rv; |
| } |
| } |
| |
| function makeDefaultHandle() { |
| if (this.isSmartPointer) { |
| return makeClassHandle(this.registeredClass.instancePrototype, { |
| ptrType: this.pointeeType, |
| ptr: rawPointer, |
| smartPtrType: this, |
| smartPtr: ptr, |
| }); |
| } else { |
| return makeClassHandle(this.registeredClass.instancePrototype, { |
| ptrType: this, |
| ptr, |
| }); |
| } |
| } |
| |
| var actualType = this.registeredClass.getActualType(rawPointer); |
| var registeredPointerRecord = registeredPointers[actualType]; |
| if (!registeredPointerRecord) { |
| return makeDefaultHandle.call(this); |
| } |
| |
| var toType; |
| if (this.isConst) { |
| toType = registeredPointerRecord.constPointerType; |
| } else { |
| toType = registeredPointerRecord.pointerType; |
| } |
| var dp = downcastPointer( |
| rawPointer, |
| this.registeredClass, |
| toType.registeredClass); |
| if (dp === null) { |
| return makeDefaultHandle.call(this); |
| } |
| if (this.isSmartPointer) { |
| return makeClassHandle(toType.registeredClass.instancePrototype, { |
| ptrType: toType, |
| ptr: dp, |
| smartPtrType: this, |
| smartPtr: ptr, |
| }); |
| } else { |
| return makeClassHandle(toType.registeredClass.instancePrototype, { |
| ptrType: toType, |
| ptr: dp, |
| }); |
| } |
| } |
| |
| var init_RegisteredPointer = () => { |
| Object.assign(RegisteredPointer.prototype, { |
| getPointee(ptr) { |
| if (this.rawGetPointee) { |
| ptr = this.rawGetPointee(ptr); |
| } |
| return ptr; |
| }, |
| destructor(ptr) { |
| this.rawDestructor?.(ptr); |
| }, |
| argPackAdvance: GenericWireTypeSize, |
| 'readValueFromPointer': readPointer, |
| 'fromWireType': RegisteredPointer_fromWireType, |
| }); |
| }; |
| /** @constructor |
| @param {*=} pointeeType, |
| @param {*=} sharingPolicy, |
| @param {*=} rawGetPointee, |
| @param {*=} rawConstructor, |
| @param {*=} rawShare, |
| @param {*=} rawDestructor, |
| */ |
| function RegisteredPointer( |
| name, |
| registeredClass, |
| isReference, |
| isConst, |
| |
| // smart pointer properties |
| isSmartPointer, |
| pointeeType, |
| sharingPolicy, |
| rawGetPointee, |
| rawConstructor, |
| rawShare, |
| rawDestructor |
| ) { |
| this.name = name; |
| this.registeredClass = registeredClass; |
| this.isReference = isReference; |
| this.isConst = isConst; |
| |
| // smart pointer properties |
| this.isSmartPointer = isSmartPointer; |
| this.pointeeType = pointeeType; |
| this.sharingPolicy = sharingPolicy; |
| this.rawGetPointee = rawGetPointee; |
| this.rawConstructor = rawConstructor; |
| this.rawShare = rawShare; |
| this.rawDestructor = rawDestructor; |
| |
| if (!isSmartPointer && registeredClass.baseClass === undefined) { |
| if (isConst) { |
| this['toWireType'] = constNoSmartPtrRawPointerToWireType; |
| this.destructorFunction = null; |
| } else { |
| this['toWireType'] = nonConstNoSmartPtrRawPointerToWireType; |
| this.destructorFunction = null; |
| } |
| } else { |
| this['toWireType'] = genericPointerToWireType; |
| // Here we must leave this.destructorFunction undefined, since whether genericPointerToWireType returns |
| // a pointer that needs to be freed up is runtime-dependent, and cannot be evaluated at registration time. |
| // TODO: Create an alternative mechanism that allows removing the use of var destructors = []; array in |
| // craftInvokerFunction altogether. |
| } |
| } |
| |
| /** @param {number=} numArguments */ |
| var replacePublicSymbol = (name, value, numArguments) => { |
| if (!Module.hasOwnProperty(name)) { |
| throwInternalError('Replacing nonexistent public symbol'); |
| } |
| // If there's an overload table for this symbol, replace the symbol in the overload table instead. |
| if (undefined !== Module[name].overloadTable && undefined !== numArguments) { |
| Module[name].overloadTable[numArguments] = value; |
| } else { |
| Module[name] = value; |
| Module[name].argCount = numArguments; |
| } |
| }; |
| |
| |
| |
| var wasmTableMirror = []; |
| |
| /** @type {WebAssembly.Table} */ |
| var wasmTable; |
| var getWasmTableEntry = (funcPtr) => { |
| var func = wasmTableMirror[funcPtr]; |
| if (!func) { |
| /** @suppress {checkTypes} */ |
| wasmTableMirror[funcPtr] = func = wasmTable.get(funcPtr); |
| } |
| return func; |
| }; |
| var embind__requireFunction = (signature, rawFunction, isAsync = false) => { |
| |
| signature = AsciiToString(signature); |
| |
| function makeDynCaller() { |
| var rtn = getWasmTableEntry(rawFunction); |
| return rtn; |
| } |
| |
| var fp = makeDynCaller(); |
| if (typeof fp != 'function') { |
| throwBindingError(`unknown function pointer with signature ${signature}: ${rawFunction}`); |
| } |
| return fp; |
| }; |
| |
| |
| |
| class UnboundTypeError extends Error {} |
| |
| |
| |
| var getTypeName = (type) => { |
| var ptr = ___getTypeName(type); |
| var rv = AsciiToString(ptr); |
| _free(ptr); |
| return rv; |
| }; |
| var throwUnboundTypeError = (message, types) => { |
| var unboundTypes = []; |
| var seen = {}; |
| function visit(type) { |
| if (seen[type]) { |
| return; |
| } |
| if (registeredTypes[type]) { |
| return; |
| } |
| if (typeDependencies[type]) { |
| typeDependencies[type].forEach(visit); |
| return; |
| } |
| unboundTypes.push(type); |
| seen[type] = true; |
| } |
| types.forEach(visit); |
| |
| throw new UnboundTypeError(`${message}: ` + unboundTypes.map(getTypeName).join([', '])); |
| }; |
| |
| var __embind_register_class = (rawType, |
| rawPointerType, |
| rawConstPointerType, |
| baseClassRawType, |
| getActualTypeSignature, |
| getActualType, |
| upcastSignature, |
| upcast, |
| downcastSignature, |
| downcast, |
| name, |
| destructorSignature, |
| rawDestructor) => { |
| name = AsciiToString(name); |
| getActualType = embind__requireFunction(getActualTypeSignature, getActualType); |
| upcast &&= embind__requireFunction(upcastSignature, upcast); |
| downcast &&= embind__requireFunction(downcastSignature, downcast); |
| rawDestructor = embind__requireFunction(destructorSignature, rawDestructor); |
| var legalFunctionName = makeLegalFunctionName(name); |
| |
| exposePublicSymbol(legalFunctionName, function() { |
| // this code cannot run if baseClassRawType is zero |
| throwUnboundTypeError(`Cannot construct ${name} due to unbound types`, [baseClassRawType]); |
| }); |
| |
| whenDependentTypesAreResolved( |
| [rawType, rawPointerType, rawConstPointerType], |
| baseClassRawType ? [baseClassRawType] : [], |
| (base) => { |
| base = base[0]; |
| |
| var baseClass; |
| var basePrototype; |
| if (baseClassRawType) { |
| baseClass = base.registeredClass; |
| basePrototype = baseClass.instancePrototype; |
| } else { |
| basePrototype = ClassHandle.prototype; |
| } |
| |
| var constructor = createNamedFunction(name, function(...args) { |
| if (Object.getPrototypeOf(this) !== instancePrototype) { |
| throw new BindingError(`Use 'new' to construct ${name}`); |
| } |
| if (undefined === registeredClass.constructor_body) { |
| throw new BindingError(`${name} has no accessible constructor`); |
| } |
| var body = registeredClass.constructor_body[args.length]; |
| if (undefined === body) { |
| throw new BindingError(`Tried to invoke ctor of ${name} with invalid number of parameters (${args.length}) - expected (${Object.keys(registeredClass.constructor_body).toString()}) parameters instead!`); |
| } |
| return body.apply(this, args); |
| }); |
| |
| var instancePrototype = Object.create(basePrototype, { |
| constructor: { value: constructor }, |
| }); |
| |
| constructor.prototype = instancePrototype; |
| |
| var registeredClass = new RegisteredClass(name, |
| constructor, |
| instancePrototype, |
| rawDestructor, |
| baseClass, |
| getActualType, |
| upcast, |
| downcast); |
| |
| if (registeredClass.baseClass) { |
| // Keep track of class hierarchy. Used to allow sub-classes to inherit class functions. |
| registeredClass.baseClass.__derivedClasses ??= []; |
| |
| registeredClass.baseClass.__derivedClasses.push(registeredClass); |
| } |
| |
| var referenceConverter = new RegisteredPointer(name, |
| registeredClass, |
| true, |
| false, |
| false); |
| |
| var pointerConverter = new RegisteredPointer(name + '*', |
| registeredClass, |
| false, |
| false, |
| false); |
| |
| var constPointerConverter = new RegisteredPointer(name + ' const*', |
| registeredClass, |
| false, |
| true, |
| false); |
| |
| registeredPointers[rawType] = { |
| pointerType: pointerConverter, |
| constPointerType: constPointerConverter |
| }; |
| |
| replacePublicSymbol(legalFunctionName, constructor); |
| |
| return [referenceConverter, pointerConverter, constPointerConverter]; |
| } |
| ); |
| }; |
| |
| var heap32VectorToArray = (count, firstElement) => { |
| var array = []; |
| for (var i = 0; i < count; i++) { |
| // TODO(https://github.com/emscripten-core/emscripten/issues/17310): |
| // Find a way to hoist the `>> 2` or `>> 3` out of this loop. |
| array.push(HEAPU32[(((firstElement)+(i * 4))>>2)]); |
| } |
| return array; |
| }; |
| |
| |
| |
| |
| |
| |
| function usesDestructorStack(argTypes) { |
| // Skip return value at index 0 - it's not deleted here. |
| for (var i = 1; i < argTypes.length; ++i) { |
| // The type does not define a destructor function - must use dynamic stack |
| if (argTypes[i] !== null && argTypes[i].destructorFunction === undefined) { |
| return true; |
| } |
| } |
| return false; |
| } |
| |
| function createJsInvoker(argTypes, isClassMethodFunc, returns, isAsync) { |
| var needsDestructorStack = usesDestructorStack(argTypes); |
| var argCount = argTypes.length - 2; |
| var argsList = []; |
| var argsListWired = ['fn']; |
| if (isClassMethodFunc) { |
| argsListWired.push('thisWired'); |
| } |
| for (var i = 0; i < argCount; ++i) { |
| argsList.push(`arg${i}`) |
| argsListWired.push(`arg${i}Wired`) |
| } |
| argsList = argsList.join(',') |
| argsListWired = argsListWired.join(',') |
| |
| var invokerFnBody = `return function (${argsList}) {\n`; |
| |
| if (needsDestructorStack) { |
| invokerFnBody += "var destructors = [];\n"; |
| } |
| |
| var dtorStack = needsDestructorStack ? "destructors" : "null"; |
| var args1 = ["humanName", "throwBindingError", "invoker", "fn", "runDestructors", "retType", "classParam"]; |
| |
| if (isClassMethodFunc) { |
| invokerFnBody += `var thisWired = classParam['toWireType'](${dtorStack}, this);\n`; |
| } |
| |
| for (var i = 0; i < argCount; ++i) { |
| invokerFnBody += `var arg${i}Wired = argType${i}['toWireType'](${dtorStack}, arg${i});\n`; |
| args1.push(`argType${i}`); |
| } |
| |
| invokerFnBody += (returns || isAsync ? "var rv = ":"") + `invoker(${argsListWired});\n`; |
| |
| var returnVal = returns ? "rv" : ""; |
| |
| if (needsDestructorStack) { |
| invokerFnBody += "runDestructors(destructors);\n"; |
| } else { |
| for (var i = isClassMethodFunc?1:2; i < argTypes.length; ++i) { // Skip return value at index 0 - it's not deleted here. Also skip class type if not a method. |
| var paramName = (i === 1 ? "thisWired" : ("arg"+(i - 2)+"Wired")); |
| if (argTypes[i].destructorFunction !== null) { |
| invokerFnBody += `${paramName}_dtor(${paramName});\n`; |
| args1.push(`${paramName}_dtor`); |
| } |
| } |
| } |
| |
| if (returns) { |
| invokerFnBody += "var ret = retType['fromWireType'](rv);\n" + |
| "return ret;\n"; |
| } else { |
| } |
| |
| invokerFnBody += "}\n"; |
| |
| return [args1, invokerFnBody]; |
| } |
| function craftInvokerFunction(humanName, argTypes, classType, cppInvokerFunc, cppTargetFunc, /** boolean= */ isAsync) { |
| // humanName: a human-readable string name for the function to be generated. |
| // argTypes: An array that contains the embind type objects for all types in the function signature. |
| // argTypes[0] is the type object for the function return value. |
| // argTypes[1] is the type object for function this object/class type, or null if not crafting an invoker for a class method. |
| // argTypes[2...] are the actual function parameters. |
| // classType: The embind type object for the class to be bound, or null if this is not a method of a class. |
| // cppInvokerFunc: JS Function object to the C++-side function that interops into C++ code. |
| // cppTargetFunc: Function pointer (an integer to FUNCTION_TABLE) to the target C++ function the cppInvokerFunc will end up calling. |
| // isAsync: Optional. If true, returns an async function. Async bindings are only supported with JSPI. |
| var argCount = argTypes.length; |
| |
| if (argCount < 2) { |
| throwBindingError("argTypes array size mismatch! Must at least get return value and 'this' types!"); |
| } |
| |
| var isClassMethodFunc = (argTypes[1] !== null && classType !== null); |
| |
| // Free functions with signature "void function()" do not need an invoker that marshalls between wire types. |
| // TODO: This omits argument count check - enable only at -O3 or similar. |
| // if (ENABLE_UNSAFE_OPTS && argCount == 2 && argTypes[0].name == "void" && !isClassMethodFunc) { |
| // return FUNCTION_TABLE[fn]; |
| // } |
| |
| // Determine if we need to use a dynamic stack to store the destructors for the function parameters. |
| // TODO: Remove this completely once all function invokers are being dynamically generated. |
| var needsDestructorStack = usesDestructorStack(argTypes); |
| |
| var returns = (argTypes[0].name !== 'void'); |
| |
| var expectedArgCount = argCount - 2; |
| // Builld the arguments that will be passed into the closure around the invoker |
| // function. |
| var closureArgs = [humanName, throwBindingError, cppInvokerFunc, cppTargetFunc, runDestructors, argTypes[0], argTypes[1]]; |
| for (var i = 0; i < argCount - 2; ++i) { |
| closureArgs.push(argTypes[i+2]); |
| } |
| if (!needsDestructorStack) { |
| // Skip return value at index 0 - it's not deleted here. Also skip class type if not a method. |
| for (var i = isClassMethodFunc?1:2; i < argTypes.length; ++i) { |
| if (argTypes[i].destructorFunction !== null) { |
| closureArgs.push(argTypes[i].destructorFunction); |
| } |
| } |
| } |
| |
| let [args, invokerFnBody] = createJsInvoker(argTypes, isClassMethodFunc, returns, isAsync); |
| var invokerFn = new Function(...args, invokerFnBody)(...closureArgs); |
| return createNamedFunction(humanName, invokerFn); |
| } |
| var __embind_register_class_constructor = ( |
| rawClassType, |
| argCount, |
| rawArgTypesAddr, |
| invokerSignature, |
| invoker, |
| rawConstructor |
| ) => { |
| var rawArgTypes = heap32VectorToArray(argCount, rawArgTypesAddr); |
| invoker = embind__requireFunction(invokerSignature, invoker); |
| var args = [rawConstructor]; |
| var destructors = []; |
| |
| whenDependentTypesAreResolved([], [rawClassType], (classType) => { |
| classType = classType[0]; |
| var humanName = `constructor ${classType.name}`; |
| |
| if (undefined === classType.registeredClass.constructor_body) { |
| classType.registeredClass.constructor_body = []; |
| } |
| if (undefined !== classType.registeredClass.constructor_body[argCount - 1]) { |
| throw new BindingError(`Cannot register multiple constructors with identical number of parameters (${argCount-1}) for class '${classType.name}'! Overload resolution is currently only performed using the parameter count, not actual type info!`); |
| } |
| classType.registeredClass.constructor_body[argCount - 1] = () => { |
| throwUnboundTypeError(`Cannot construct ${classType.name} due to unbound types`, rawArgTypes); |
| }; |
| |
| whenDependentTypesAreResolved([], rawArgTypes, (argTypes) => { |
| // Insert empty slot for context type (argTypes[1]). |
| argTypes.splice(1, 0, null); |
| classType.registeredClass.constructor_body[argCount - 1] = craftInvokerFunction(humanName, argTypes, null, invoker, rawConstructor); |
| return []; |
| }); |
| return []; |
| }); |
| }; |
| |
| |
| |
| |
| |
| |
| |
| var getFunctionName = (signature) => { |
| signature = signature.trim(); |
| const argsIndex = signature.indexOf("("); |
| if (argsIndex === -1) return signature; |
| return signature.slice(0, argsIndex); |
| }; |
| var __embind_register_class_function = (rawClassType, |
| methodName, |
| argCount, |
| rawArgTypesAddr, // [ReturnType, ThisType, Args...] |
| invokerSignature, |
| rawInvoker, |
| context, |
| isPureVirtual, |
| isAsync, |
| isNonnullReturn) => { |
| var rawArgTypes = heap32VectorToArray(argCount, rawArgTypesAddr); |
| methodName = AsciiToString(methodName); |
| methodName = getFunctionName(methodName); |
| rawInvoker = embind__requireFunction(invokerSignature, rawInvoker, isAsync); |
| |
| whenDependentTypesAreResolved([], [rawClassType], (classType) => { |
| classType = classType[0]; |
| var humanName = `${classType.name}.${methodName}`; |
| |
| if (methodName.startsWith("@@")) { |
| methodName = Symbol[methodName.substring(2)]; |
| } |
| |
| if (isPureVirtual) { |
| classType.registeredClass.pureVirtualFunctions.push(methodName); |
| } |
| |
| function unboundTypesHandler() { |
| throwUnboundTypeError(`Cannot call ${humanName} due to unbound types`, rawArgTypes); |
| } |
| |
| var proto = classType.registeredClass.instancePrototype; |
| var method = proto[methodName]; |
| if (undefined === method || (undefined === method.overloadTable && method.className !== classType.name && method.argCount === argCount - 2)) { |
| // This is the first overload to be registered, OR we are replacing a |
| // function in the base class with a function in the derived class. |
| unboundTypesHandler.argCount = argCount - 2; |
| unboundTypesHandler.className = classType.name; |
| proto[methodName] = unboundTypesHandler; |
| } else { |
| // There was an existing function with the same name registered. Set up |
| // a function overload routing table. |
| ensureOverloadTable(proto, methodName, humanName); |
| proto[methodName].overloadTable[argCount - 2] = unboundTypesHandler; |
| } |
| |
| whenDependentTypesAreResolved([], rawArgTypes, (argTypes) => { |
| var memberFunction = craftInvokerFunction(humanName, argTypes, classType, rawInvoker, context, isAsync); |
| |
| // Replace the initial unbound-handler-stub function with the |
| // appropriate member function, now that all types are resolved. If |
| // multiple overloads are registered for this function, the function |
| // goes into an overload table. |
| if (undefined === proto[methodName].overloadTable) { |
| // Set argCount in case an overload is registered later |
| memberFunction.argCount = argCount - 2; |
| proto[methodName] = memberFunction; |
| } else { |
| proto[methodName].overloadTable[argCount - 2] = memberFunction; |
| } |
| |
| return []; |
| }); |
| return []; |
| }); |
| }; |
| |
| |
| var __embind_register_constant = (name, type, value) => { |
| name = AsciiToString(name); |
| whenDependentTypesAreResolved([], [type], (type) => { |
| type = type[0]; |
| Module[name] = type['fromWireType'](value); |
| return []; |
| }); |
| }; |
| |
| |
| var emval_freelist = []; |
| |
| var emval_handles = [0,1,,1,null,1,true,1,false,1]; |
| var __emval_decref = (handle) => { |
| if (handle > 9 && 0 === --emval_handles[handle + 1]) { |
| emval_handles[handle] = undefined; |
| emval_freelist.push(handle); |
| } |
| }; |
| |
| |
| |
| var Emval = { |
| toValue:(handle) => { |
| if (!handle) { |
| throwBindingError(`Cannot use deleted val. handle = ${handle}`); |
| } |
| return emval_handles[handle]; |
| }, |
| toHandle:(value) => { |
| switch (value) { |
| case undefined: return 2; |
| case null: return 4; |
| case true: return 6; |
| case false: return 8; |
| default:{ |
| const handle = emval_freelist.pop() || emval_handles.length; |
| emval_handles[handle] = value; |
| emval_handles[handle + 1] = 1; |
| return handle; |
| } |
| } |
| }, |
| }; |
| |
| |
| var EmValType = { |
| name: 'emscripten::val', |
| 'fromWireType': (handle) => { |
| var rv = Emval.toValue(handle); |
| __emval_decref(handle); |
| return rv; |
| }, |
| 'toWireType': (destructors, value) => Emval.toHandle(value), |
| argPackAdvance: GenericWireTypeSize, |
| 'readValueFromPointer': readPointer, |
| destructorFunction: null, // This type does not need a destructor |
| |
| // TODO: do we need a deleteObject here? write a test where |
| // emval is passed into JS via an interface |
| }; |
| var __embind_register_emval = (rawType) => registerType(rawType, EmValType); |
| |
| |
| var enumReadValueFromPointer = (name, width, signed) => { |
| switch (width) { |
| case 1: return signed ? |
| function(pointer) { return this['fromWireType'](HEAP8[pointer]) } : |
| function(pointer) { return this['fromWireType'](HEAPU8[pointer]) }; |
| case 2: return signed ? |
| function(pointer) { return this['fromWireType'](HEAP16[((pointer)>>1)]) } : |
| function(pointer) { return this['fromWireType'](HEAPU16[((pointer)>>1)]) }; |
| case 4: return signed ? |
| function(pointer) { return this['fromWireType'](HEAP32[((pointer)>>2)]) } : |
| function(pointer) { return this['fromWireType'](HEAPU32[((pointer)>>2)]) }; |
| default: |
| throw new TypeError(`invalid integer width (${width}): ${name}`); |
| } |
| }; |
| |
| |
| /** @suppress {globalThis} */ |
| var __embind_register_enum = (rawType, name, size, isSigned) => { |
| name = AsciiToString(name); |
| |
| function ctor() {} |
| ctor.values = {}; |
| |
| registerType(rawType, { |
| name, |
| constructor: ctor, |
| 'fromWireType': function(c) { |
| return this.constructor.values[c]; |
| }, |
| 'toWireType': (destructors, c) => c.value, |
| argPackAdvance: GenericWireTypeSize, |
| 'readValueFromPointer': enumReadValueFromPointer(name, size, isSigned), |
| destructorFunction: null, |
| }); |
| exposePublicSymbol(name, ctor); |
| }; |
| |
| |
| |
| |
| |
| var requireRegisteredType = (rawType, humanName) => { |
| var impl = registeredTypes[rawType]; |
| if (undefined === impl) { |
| throwBindingError(`${humanName} has unknown type ${getTypeName(rawType)}`); |
| } |
| return impl; |
| }; |
| var __embind_register_enum_value = (rawEnumType, name, enumValue) => { |
| var enumType = requireRegisteredType(rawEnumType, 'enum'); |
| name = AsciiToString(name); |
| |
| var Enum = enumType.constructor; |
| |
| var Value = Object.create(enumType.constructor.prototype, { |
| value: {value: enumValue}, |
| constructor: {value: createNamedFunction(`${enumType.name}_${name}`, function() {})}, |
| }); |
| Enum.values[enumValue] = Value; |
| Enum[name] = Value; |
| }; |
| |
| var floatReadValueFromPointer = (name, width) => { |
| switch (width) { |
| case 4: return function(pointer) { |
| return this['fromWireType'](HEAPF32[((pointer)>>2)]); |
| }; |
| case 8: return function(pointer) { |
| return this['fromWireType'](HEAPF64[((pointer)>>3)]); |
| }; |
| default: |
| throw new TypeError(`invalid float width (${width}): ${name}`); |
| } |
| }; |
| |
| |
| var __embind_register_float = (rawType, name, size) => { |
| name = AsciiToString(name); |
| registerType(rawType, { |
| name, |
| 'fromWireType': (value) => value, |
| 'toWireType': (destructors, value) => { |
| // The VM will perform JS to Wasm value conversion, according to the spec: |
| // https://www.w3.org/TR/wasm-js-api-1/#towebassemblyvalue |
| return value; |
| }, |
| argPackAdvance: GenericWireTypeSize, |
| 'readValueFromPointer': floatReadValueFromPointer(name, size), |
| destructorFunction: null, // This type does not need a destructor |
| }); |
| }; |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| var __embind_register_function = (name, argCount, rawArgTypesAddr, signature, rawInvoker, fn, isAsync, isNonnullReturn) => { |
| var argTypes = heap32VectorToArray(argCount, rawArgTypesAddr); |
| name = AsciiToString(name); |
| name = getFunctionName(name); |
| |
| rawInvoker = embind__requireFunction(signature, rawInvoker, isAsync); |
| |
| exposePublicSymbol(name, function() { |
| throwUnboundTypeError(`Cannot call ${name} due to unbound types`, argTypes); |
| }, argCount - 1); |
| |
| whenDependentTypesAreResolved([], argTypes, (argTypes) => { |
| var invokerArgsArray = [argTypes[0] /* return value */, null /* no class 'this'*/].concat(argTypes.slice(1) /* actual params */); |
| replacePublicSymbol(name, craftInvokerFunction(name, invokerArgsArray, null /* no class 'this'*/, rawInvoker, fn, isAsync), argCount - 1); |
| return []; |
| }); |
| }; |
| |
| |
| |
| /** @suppress {globalThis} */ |
| var __embind_register_integer = (primitiveType, name, size, minRange, maxRange) => { |
| name = AsciiToString(name); |
| |
| const isUnsignedType = minRange === 0; |
| |
| let fromWireType = (value) => value; |
| if (isUnsignedType) { |
| var bitshift = 32 - 8*size; |
| fromWireType = (value) => (value << bitshift) >>> bitshift; |
| maxRange = fromWireType(maxRange); |
| } |
| |
| registerType(primitiveType, { |
| name, |
| 'fromWireType': fromWireType, |
| 'toWireType': (destructors, value) => { |
| // The VM will perform JS to Wasm value conversion, according to the spec: |
| // https://www.w3.org/TR/wasm-js-api-1/#towebassemblyvalue |
| return value; |
| }, |
| argPackAdvance: GenericWireTypeSize, |
| 'readValueFromPointer': integerReadValueFromPointer(name, size, minRange !== 0), |
| destructorFunction: null, // This type does not need a destructor |
| }); |
| }; |
| |
| |
| var __embind_register_memory_view = (rawType, dataTypeIndex, name) => { |
| var typeMapping = [ |
| Int8Array, |
| Uint8Array, |
| Int16Array, |
| Uint16Array, |
| Int32Array, |
| Uint32Array, |
| Float32Array, |
| Float64Array, |
| BigInt64Array, |
| BigUint64Array, |
| ]; |
| |
| var TA = typeMapping[dataTypeIndex]; |
| |
| function decodeMemoryView(handle) { |
| var size = HEAPU32[((handle)>>2)]; |
| var data = HEAPU32[(((handle)+(4))>>2)]; |
| return new TA(HEAP8.buffer, data, size); |
| } |
| |
| name = AsciiToString(name); |
| registerType(rawType, { |
| name, |
| 'fromWireType': decodeMemoryView, |
| argPackAdvance: GenericWireTypeSize, |
| 'readValueFromPointer': decodeMemoryView, |
| }, { |
| ignoreDuplicateRegistrations: true, |
| }); |
| }; |
| |
| |
| |
| |
| |
| var stringToUTF8 = (str, outPtr, maxBytesToWrite) => { |
| return stringToUTF8Array(str, HEAPU8, outPtr, maxBytesToWrite); |
| }; |
| |
| |
| |
| |
| var __embind_register_std_string = (rawType, name) => { |
| name = AsciiToString(name); |
| var stdStringIsUTF8 |
| = true; |
| |
| registerType(rawType, { |
| name, |
| // For some method names we use string keys here since they are part of |
| // the public/external API and/or used by the runtime-generated code. |
| 'fromWireType'(value) { |
| var length = HEAPU32[((value)>>2)]; |
| var payload = value + 4; |
| |
| var str; |
| if (stdStringIsUTF8) { |
| var decodeStartPtr = payload; |
| // Looping here to support possible embedded '0' bytes |
| for (var i = 0; i <= length; ++i) { |
| var currentBytePtr = payload + i; |
| if (i == length || HEAPU8[currentBytePtr] == 0) { |
| var maxRead = currentBytePtr - decodeStartPtr; |
| var stringSegment = UTF8ToString(decodeStartPtr, maxRead); |
| if (str === undefined) { |
| str = stringSegment; |
| } else { |
| str += String.fromCharCode(0); |
| str += stringSegment; |
| } |
| decodeStartPtr = currentBytePtr + 1; |
| } |
| } |
| } else { |
| var a = new Array(length); |
| for (var i = 0; i < length; ++i) { |
| a[i] = String.fromCharCode(HEAPU8[payload + i]); |
| } |
| str = a.join(''); |
| } |
| |
| _free(value); |
| |
| return str; |
| }, |
| 'toWireType'(destructors, value) { |
| if (value instanceof ArrayBuffer) { |
| value = new Uint8Array(value); |
| } |
| |
| var length; |
| var valueIsOfTypeString = (typeof value == 'string'); |
| |
| // We accept `string` or array views with single byte elements |
| if (!(valueIsOfTypeString || (ArrayBuffer.isView(value) && value.BYTES_PER_ELEMENT == 1))) { |
| throwBindingError('Cannot pass non-string to std::string'); |
| } |
| if (stdStringIsUTF8 && valueIsOfTypeString) { |
| length = lengthBytesUTF8(value); |
| } else { |
| length = value.length; |
| } |
| |
| // assumes POINTER_SIZE alignment |
| var base = _malloc(4 + length + 1); |
| var ptr = base + 4; |
| HEAPU32[((base)>>2)] = length; |
| if (valueIsOfTypeString) { |
| if (stdStringIsUTF8) { |
| stringToUTF8(value, ptr, length + 1); |
| } else { |
| for (var i = 0; i < length; ++i) { |
| var charCode = value.charCodeAt(i); |
| if (charCode > 255) { |
| _free(base); |
| throwBindingError('String has UTF-16 code units that do not fit in 8 bits'); |
| } |
| HEAPU8[ptr + i] = charCode; |
| } |
| } |
| } else { |
| HEAPU8.set(value, ptr); |
| } |
| |
| if (destructors !== null) { |
| destructors.push(_free, base); |
| } |
| return base; |
| }, |
| argPackAdvance: GenericWireTypeSize, |
| 'readValueFromPointer': readPointer, |
| destructorFunction(ptr) { |
| _free(ptr); |
| }, |
| }); |
| }; |
| |
| |
| |
| |
| var UTF16Decoder = typeof TextDecoder != 'undefined' ? new TextDecoder('utf-16le') : undefined;; |
| var UTF16ToString = (ptr, maxBytesToRead) => { |
| var idx = ((ptr)>>1); |
| var maxIdx = idx + maxBytesToRead / 2; |
| // TextDecoder needs to know the byte length in advance, it doesn't stop on |
| // null terminator by itself. |
| // Also, use the length info to avoid running tiny strings through |
| // TextDecoder, since .subarray() allocates garbage. |
| var endIdx = idx; |
| // If maxBytesToRead is not passed explicitly, it will be undefined, and this |
| // will always evaluate to true. This saves on code size. |
| while (!(endIdx >= maxIdx) && HEAPU16[endIdx]) ++endIdx; |
| |
| if (endIdx - idx > 16 && UTF16Decoder) |
| return UTF16Decoder.decode(HEAPU16.subarray(idx, endIdx)); |
| |
| // Fallback: decode without UTF16Decoder |
| var str = ''; |
| |
| // If maxBytesToRead is not passed explicitly, it will be undefined, and the |
| // for-loop's condition will always evaluate to true. The loop is then |
| // terminated on the first null char. |
| for (var i = idx; !(i >= maxIdx); ++i) { |
| var codeUnit = HEAPU16[i]; |
| if (codeUnit == 0) break; |
| // fromCharCode constructs a character from a UTF-16 code unit, so we can |
| // pass the UTF16 string right through. |
| str += String.fromCharCode(codeUnit); |
| } |
| |
| return str; |
| }; |
| |
| var stringToUTF16 = (str, outPtr, maxBytesToWrite) => { |
| // Backwards compatibility: if max bytes is not specified, assume unsafe unbounded write is allowed. |
| maxBytesToWrite ??= 0x7FFFFFFF; |
| if (maxBytesToWrite < 2) return 0; |
| maxBytesToWrite -= 2; // Null terminator. |
| var startPtr = outPtr; |
| var numCharsToWrite = (maxBytesToWrite < str.length*2) ? (maxBytesToWrite / 2) : str.length; |
| for (var i = 0; i < numCharsToWrite; ++i) { |
| // charCodeAt returns a UTF-16 encoded code unit, so it can be directly written to the HEAP. |
| var codeUnit = str.charCodeAt(i); // possibly a lead surrogate |
| HEAP16[((outPtr)>>1)] = codeUnit; |
| outPtr += 2; |
| } |
| // Null-terminate the pointer to the HEAP. |
| HEAP16[((outPtr)>>1)] = 0; |
| return outPtr - startPtr; |
| }; |
| |
| var lengthBytesUTF16 = (str) => str.length*2; |
| |
| var UTF32ToString = (ptr, maxBytesToRead) => { |
| var str = ''; |
| // If maxBytesToRead is not passed explicitly, it will be undefined, and this |
| // will always evaluate to true. This saves on code size. |
| for (var i = 0; !(i >= maxBytesToRead / 4); i++) { |
| var utf32 = HEAP32[(((ptr)+(i*4))>>2)]; |
| if (!utf32) break; |
| str += String.fromCodePoint(utf32); |
| } |
| return str; |
| }; |
| |
| var stringToUTF32 = (str, outPtr, maxBytesToWrite) => { |
| // Backwards compatibility: if max bytes is not specified, assume unsafe unbounded write is allowed. |
| maxBytesToWrite ??= 0x7FFFFFFF; |
| if (maxBytesToWrite < 4) return 0; |
| var startPtr = outPtr; |
| var endPtr = startPtr + maxBytesToWrite - 4; |
| for (var i = 0; i < str.length; ++i) { |
| var codePoint = str.codePointAt(i); |
| // Gotcha: if codePoint is over 0xFFFF, it is represented as a surrogate pair in UTF-16. |
| // We need to manually skip over the second code unit for correct iteration. |
| if (codePoint > 0xFFFF) { |
| i++; |
| } |
| HEAP32[((outPtr)>>2)] = codePoint; |
| outPtr += 4; |
| if (outPtr + 4 > endPtr) break; |
| } |
| // Null-terminate the pointer to the HEAP. |
| HEAP32[((outPtr)>>2)] = 0; |
| return outPtr - startPtr; |
| }; |
| |
| var lengthBytesUTF32 = (str) => { |
| var len = 0; |
| for (var i = 0; i < str.length; ++i) { |
| var codePoint = str.codePointAt(i); |
| // Gotcha: if codePoint is over 0xFFFF, it is represented as a surrogate pair in UTF-16. |
| // We need to manually skip over the second code unit for correct iteration. |
| if (codePoint > 0xFFFF) { |
| i++; |
| } |
| len += 4; |
| } |
| |
| return len; |
| }; |
| var __embind_register_std_wstring = (rawType, charSize, name) => { |
| name = AsciiToString(name); |
| var decodeString, encodeString, readCharAt, lengthBytesUTF; |
| if (charSize === 2) { |
| decodeString = UTF16ToString; |
| encodeString = stringToUTF16; |
| lengthBytesUTF = lengthBytesUTF16; |
| readCharAt = (pointer) => HEAPU16[((pointer)>>1)]; |
| } else if (charSize === 4) { |
| decodeString = UTF32ToString; |
| encodeString = stringToUTF32; |
| lengthBytesUTF = lengthBytesUTF32; |
| readCharAt = (pointer) => HEAPU32[((pointer)>>2)]; |
| } |
| registerType(rawType, { |
| name, |
| 'fromWireType': (value) => { |
| // Code mostly taken from _embind_register_std_string fromWireType |
| var length = HEAPU32[((value)>>2)]; |
| var str; |
| |
| var decodeStartPtr = value + 4; |
| // Looping here to support possible embedded '0' bytes |
| for (var i = 0; i <= length; ++i) { |
| var currentBytePtr = value + 4 + i * charSize; |
| if (i == length || readCharAt(currentBytePtr) == 0) { |
| var maxReadBytes = currentBytePtr - decodeStartPtr; |
| var stringSegment = decodeString(decodeStartPtr, maxReadBytes); |
| if (str === undefined) { |
| str = stringSegment; |
| } else { |
| str += String.fromCharCode(0); |
| str += stringSegment; |
| } |
| decodeStartPtr = currentBytePtr + charSize; |
| } |
| } |
| |
| _free(value); |
| |
| return str; |
| }, |
| 'toWireType': (destructors, value) => { |
| if (!(typeof value == 'string')) { |
| throwBindingError(`Cannot pass non-string to C++ string type ${name}`); |
| } |
| |
| // assumes POINTER_SIZE alignment |
| var length = lengthBytesUTF(value); |
| var ptr = _malloc(4 + length + charSize); |
| HEAPU32[((ptr)>>2)] = length / charSize; |
| |
| encodeString(value, ptr + 4, length + charSize); |
| |
| if (destructors !== null) { |
| destructors.push(_free, ptr); |
| } |
| return ptr; |
| }, |
| argPackAdvance: GenericWireTypeSize, |
| 'readValueFromPointer': readPointer, |
| destructorFunction(ptr) { |
| _free(ptr); |
| } |
| }); |
| }; |
| |
| |
| |
| var __embind_register_value_object = ( |
| rawType, |
| name, |
| constructorSignature, |
| rawConstructor, |
| destructorSignature, |
| rawDestructor |
| ) => { |
| structRegistrations[rawType] = { |
| name: AsciiToString(name), |
| rawConstructor: embind__requireFunction(constructorSignature, rawConstructor), |
| rawDestructor: embind__requireFunction(destructorSignature, rawDestructor), |
| fields: [], |
| }; |
| }; |
| |
| |
| |
| var __embind_register_value_object_field = ( |
| structType, |
| fieldName, |
| getterReturnType, |
| getterSignature, |
| getter, |
| getterContext, |
| setterArgumentType, |
| setterSignature, |
| setter, |
| setterContext |
| ) => { |
| structRegistrations[structType].fields.push({ |
| fieldName: AsciiToString(fieldName), |
| getterReturnType, |
| getter: embind__requireFunction(getterSignature, getter), |
| getterContext, |
| setterArgumentType, |
| setter: embind__requireFunction(setterSignature, setter), |
| setterContext, |
| }); |
| }; |
| |
| |
| var __embind_register_void = (rawType, name) => { |
| name = AsciiToString(name); |
| registerType(rawType, { |
| isVoid: true, // void return values can be optimized out sometimes |
| name, |
| argPackAdvance: 0, |
| 'fromWireType': () => undefined, |
| // TODO: assert if anything else is given? |
| 'toWireType': (destructors, o) => undefined, |
| }); |
| }; |
| |
| var __emscripten_throw_longjmp = () => { |
| throw Infinity; |
| }; |
| |
| |
| |
| var emval_returnValue = (returnType, destructorsRef, handle) => { |
| var destructors = []; |
| var result = returnType['toWireType'](destructors, handle); |
| if (destructors.length) { |
| // void, primitives and any other types w/o destructors don't need to allocate a handle |
| HEAPU32[((destructorsRef)>>2)] = Emval.toHandle(destructors); |
| } |
| return result; |
| }; |
| var __emval_as = (handle, returnType, destructorsRef) => { |
| handle = Emval.toValue(handle); |
| returnType = requireRegisteredType(returnType, 'emval::as'); |
| return emval_returnValue(returnType, destructorsRef, handle); |
| }; |
| |
| var emval_methodCallers = []; |
| |
| var __emval_call = (caller, handle, destructorsRef, args) => { |
| caller = emval_methodCallers[caller]; |
| handle = Emval.toValue(handle); |
| return caller(null, handle, destructorsRef, args); |
| }; |
| |
| var emval_symbols = { |
| }; |
| |
| var getStringOrSymbol = (address) => { |
| var symbol = emval_symbols[address]; |
| if (symbol === undefined) { |
| return AsciiToString(address); |
| } |
| return symbol; |
| }; |
| |
| |
| var __emval_call_method = (caller, objHandle, methodName, destructorsRef, args) => { |
| caller = emval_methodCallers[caller]; |
| objHandle = Emval.toValue(objHandle); |
| methodName = getStringOrSymbol(methodName); |
| return caller(objHandle, objHandle[methodName], destructorsRef, args); |
| }; |
| |
| |
| |
| |
| var emval_get_global = () => globalThis; |
| var __emval_get_global = (name) => { |
| if (name===0) { |
| return Emval.toHandle(emval_get_global()); |
| } else { |
| name = getStringOrSymbol(name); |
| return Emval.toHandle(emval_get_global()[name]); |
| } |
| }; |
| |
| var emval_addMethodCaller = (caller) => { |
| var id = emval_methodCallers.length; |
| emval_methodCallers.push(caller); |
| return id; |
| }; |
| |
| var emval_lookupTypes = (argCount, argTypes) => { |
| var a = new Array(argCount); |
| for (var i = 0; i < argCount; ++i) { |
| a[i] = requireRegisteredType(HEAPU32[(((argTypes)+(i*4))>>2)], |
| `parameter ${i}`); |
| } |
| return a; |
| }; |
| |
| |
| var __emval_get_method_caller = (argCount, argTypes, kind) => { |
| var types = emval_lookupTypes(argCount, argTypes); |
| var retType = types.shift(); |
| argCount--; // remove the shifted off return type |
| |
| var functionBody = |
| `return function (obj, func, destructorsRef, args) {\n`; |
| |
| var offset = 0; |
| var argsList = []; // 'obj?, arg0, arg1, arg2, ... , argN' |
| if (kind === /* FUNCTION */ 0) { |
| argsList.push('obj'); |
| } |
| var params = ['retType']; |
| var args = [retType]; |
| for (var i = 0; i < argCount; ++i) { |
| argsList.push(`arg${i}`); |
| params.push(`argType${i}`); |
| args.push(types[i]); |
| functionBody += |
| ` var arg${i} = argType${i}.readValueFromPointer(args${offset ? '+' + offset : ''});\n`; |
| offset += types[i].argPackAdvance; |
| } |
| var invoker = kind === /* CONSTRUCTOR */ 1 ? 'new func' : 'func.call'; |
| functionBody += |
| ` var rv = ${invoker}(${argsList.join(', ')});\n`; |
| if (!retType.isVoid) { |
| params.push('emval_returnValue'); |
| args.push(emval_returnValue); |
| functionBody += |
| ' return emval_returnValue(retType, destructorsRef, rv);\n'; |
| } |
| functionBody += |
| "};\n"; |
| |
| var invokerFunction = new Function(...params, functionBody)(...args); |
| var functionName = `methodCaller<(${types.map(t => t.name).join(', ')}) => ${retType.name}>`; |
| return emval_addMethodCaller(createNamedFunction(functionName, invokerFunction)); |
| }; |
| |
| |
| var __emval_get_module_property = (name) => { |
| name = getStringOrSymbol(name); |
| return Emval.toHandle(Module[name]); |
| }; |
| |
| var __emval_get_property = (handle, key) => { |
| handle = Emval.toValue(handle); |
| key = Emval.toValue(key); |
| return Emval.toHandle(handle[key]); |
| }; |
| |
| var __emval_incref = (handle) => { |
| if (handle > 9) { |
| emval_handles[handle + 1] += 1; |
| } |
| }; |
| |
| |
| var __emval_new_cstring = (v) => Emval.toHandle(getStringOrSymbol(v)); |
| |
| |
| |
| var __emval_run_destructors = (handle) => { |
| var destructors = Emval.toValue(handle); |
| runDestructors(destructors); |
| __emval_decref(handle); |
| }; |
| |
| |
| |
| |
| |
| |
| var INT53_MAX = 9007199254740992; |
| |
| var INT53_MIN = -9007199254740992; |
| var bigintToI53Checked = (num) => (num < INT53_MIN || num > INT53_MAX) ? NaN : Number(num); |
| function __mmap_js(len, prot, flags, fd, offset, allocated, addr) { |
| offset = bigintToI53Checked(offset); |
| |
| |
| try { |
| |
| var stream = SYSCALLS.getStreamFromFD(fd); |
| var res = FS.mmap(stream, len, offset, prot, flags); |
| var ptr = res.ptr; |
| HEAP32[((allocated)>>2)] = res.allocated; |
| HEAPU32[((addr)>>2)] = ptr; |
| return 0; |
| } catch (e) { |
| if (typeof FS == 'undefined' || !(e.name === 'ErrnoError')) throw e; |
| return -e.errno; |
| } |
| ; |
| } |
| |
| |
| function __munmap_js(addr, len, prot, flags, fd, offset) { |
| offset = bigintToI53Checked(offset); |
| |
| |
| try { |
| |
| var stream = SYSCALLS.getStreamFromFD(fd); |
| if (prot & 2) { |
| SYSCALLS.doMsync(addr, stream, len, flags, offset); |
| } |
| } catch (e) { |
| if (typeof FS == 'undefined' || !(e.name === 'ErrnoError')) throw e; |
| return -e.errno; |
| } |
| ; |
| } |
| |
| var __tzset_js = (timezone, daylight, std_name, dst_name) => { |
| // TODO: Use (malleable) environment variables instead of system settings. |
| var currentYear = new Date().getFullYear(); |
| var winter = new Date(currentYear, 0, 1); |
| var summer = new Date(currentYear, 6, 1); |
| var winterOffset = winter.getTimezoneOffset(); |
| var summerOffset = summer.getTimezoneOffset(); |
| |
| // Local standard timezone offset. Local standard time is not adjusted for |
| // daylight savings. This code uses the fact that getTimezoneOffset returns |
| // a greater value during Standard Time versus Daylight Saving Time (DST). |
| // Thus it determines the expected output during Standard Time, and it |
| // compares whether the output of the given date the same (Standard) or less |
| // (DST). |
| var stdTimezoneOffset = Math.max(winterOffset, summerOffset); |
| |
| // timezone is specified as seconds west of UTC ("The external variable |
| // `timezone` shall be set to the difference, in seconds, between |
| // Coordinated Universal Time (UTC) and local standard time."), the same |
| // as returned by stdTimezoneOffset. |
| // See http://pubs.opengroup.org/onlinepubs/009695399/functions/tzset.html |
| HEAPU32[((timezone)>>2)] = stdTimezoneOffset * 60; |
| |
| HEAP32[((daylight)>>2)] = Number(winterOffset != summerOffset); |
| |
| var extractZone = (timezoneOffset) => { |
| // Why inverse sign? |
| // Read here https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/getTimezoneOffset |
| var sign = timezoneOffset >= 0 ? "-" : "+"; |
| |
| var absOffset = Math.abs(timezoneOffset) |
| var hours = String(Math.floor(absOffset / 60)).padStart(2, "0"); |
| var minutes = String(absOffset % 60).padStart(2, "0"); |
| |
| return `UTC${sign}${hours}${minutes}`; |
| } |
| |
| var winterName = extractZone(winterOffset); |
| var summerName = extractZone(summerOffset); |
| if (summerOffset < winterOffset) { |
| // Northern hemisphere |
| stringToUTF8(winterName, std_name, 17); |
| stringToUTF8(summerName, dst_name, 17); |
| } else { |
| stringToUTF8(winterName, dst_name, 17); |
| stringToUTF8(summerName, std_name, 17); |
| } |
| }; |
| |
| var _emscripten_get_now = () => performance.now(); |
| |
| var _emscripten_date_now = () => Date.now(); |
| |
| var nowIsMonotonic = 1; |
| |
| var checkWasiClock = (clock_id) => clock_id >= 0 && clock_id <= 3; |
| |
| function _clock_time_get(clk_id, ignored_precision, ptime) { |
| ignored_precision = bigintToI53Checked(ignored_precision); |
| |
| |
| if (!checkWasiClock(clk_id)) { |
| return 28; |
| } |
| var now; |
| // all wasi clocks but realtime are monotonic |
| if (clk_id === 0) { |
| now = _emscripten_date_now(); |
| } else if (nowIsMonotonic) { |
| now = _emscripten_get_now(); |
| } else { |
| return 52; |
| } |
| // "now" is in ms, and wasi times are in ns. |
| var nsec = Math.round(now * 1000 * 1000); |
| HEAP64[((ptime)>>3)] = BigInt(nsec); |
| return 0; |
| ; |
| } |
| |
| |
| var getHeapMax = () => |
| // Stay one Wasm page short of 4GB: while e.g. Chrome is able to allocate |
| // full 4GB Wasm memories, the size will wrap back to 0 bytes in Wasm side |
| // for any code that deals with heap sizes, which would require special |
| // casing all heap size related code to treat 0 specially. |
| 2147483648; |
| var _emscripten_get_heap_max = () => getHeapMax(); |
| |
| |
| |
| |
| var growMemory = (size) => { |
| var b = wasmMemory.buffer; |
| var pages = ((size - b.byteLength + 65535) / 65536) | 0; |
| try { |
| // round size grow request up to wasm page size (fixed 64KB per spec) |
| wasmMemory.grow(pages); // .grow() takes a delta compared to the previous size |
| updateMemoryViews(); |
| return 1 /*success*/; |
| } catch(e) { |
| } |
| // implicit 0 return to save code size (caller will cast "undefined" into 0 |
| // anyhow) |
| }; |
| var _emscripten_resize_heap = (requestedSize) => { |
| var oldSize = HEAPU8.length; |
| // With CAN_ADDRESS_2GB or MEMORY64, pointers are already unsigned. |
| requestedSize >>>= 0; |
| // With multithreaded builds, races can happen (another thread might increase the size |
| // in between), so return a failure, and let the caller retry. |
| |
| // Memory resize rules: |
| // 1. Always increase heap size to at least the requested size, rounded up |
| // to next page multiple. |
| // 2a. If MEMORY_GROWTH_LINEAR_STEP == -1, excessively resize the heap |
| // geometrically: increase the heap size according to |
| // MEMORY_GROWTH_GEOMETRIC_STEP factor (default +20%), At most |
| // overreserve by MEMORY_GROWTH_GEOMETRIC_CAP bytes (default 96MB). |
| // 2b. If MEMORY_GROWTH_LINEAR_STEP != -1, excessively resize the heap |
| // linearly: increase the heap size by at least |
| // MEMORY_GROWTH_LINEAR_STEP bytes. |
| // 3. Max size for the heap is capped at 2048MB-WASM_PAGE_SIZE, or by |
| // MAXIMUM_MEMORY, or by ASAN limit, depending on which is smallest |
| // 4. If we were unable to allocate as much memory, it may be due to |
| // over-eager decision to excessively reserve due to (3) above. |
| // Hence if an allocation fails, cut down on the amount of excess |
| // growth, in an attempt to succeed to perform a smaller allocation. |
| |
| // A limit is set for how much we can grow. We should not exceed that |
| // (the wasm binary specifies it, so if we tried, we'd fail anyhow). |
| var maxHeapSize = getHeapMax(); |
| if (requestedSize > maxHeapSize) { |
| return false; |
| } |
| |
| // Loop through potential heap size increases. If we attempt a too eager |
| // reservation that fails, cut down on the attempted size and reserve a |
| // smaller bump instead. (max 3 times, chosen somewhat arbitrarily) |
| for (var cutDown = 1; cutDown <= 4; cutDown *= 2) { |
| var overGrownHeapSize = oldSize * (1 + 0.2 / cutDown); // ensure geometric growth |
| // but limit overreserving (default to capping at +96MB overgrowth at most) |
| overGrownHeapSize = Math.min(overGrownHeapSize, requestedSize + 100663296 ); |
| |
| var newSize = Math.min(maxHeapSize, alignMemory(Math.max(requestedSize, overGrownHeapSize), 65536)); |
| |
| var replacement = growMemory(newSize); |
| if (replacement) { |
| |
| return true; |
| } |
| } |
| return false; |
| }; |
| |
| var ENV = { |
| }; |
| |
| var getExecutableName = () => thisProgram || './this.program'; |
| var getEnvStrings = () => { |
| if (!getEnvStrings.strings) { |
| // Default values. |
| // Browser language detection #8751 |
| var lang = ((typeof navigator == 'object' && navigator.language) || 'C').replace('-', '_') + '.UTF-8'; |
| var env = { |
| 'USER': 'web_user', |
| 'LOGNAME': 'web_user', |
| 'PATH': '/', |
| 'PWD': '/', |
| 'HOME': '/home/web_user', |
| 'LANG': lang, |
| '_': getExecutableName() |
| }; |
| // Apply the user-provided values, if any. |
| for (var x in ENV) { |
| // x is a key in ENV; if ENV[x] is undefined, that means it was |
| // explicitly set to be so. We allow user code to do that to |
| // force variables with default values to remain unset. |
| if (ENV[x] === undefined) delete env[x]; |
| else env[x] = ENV[x]; |
| } |
| var strings = []; |
| for (var x in env) { |
| strings.push(`${x}=${env[x]}`); |
| } |
| getEnvStrings.strings = strings; |
| } |
| return getEnvStrings.strings; |
| }; |
| |
| var _environ_get = (__environ, environ_buf) => { |
| var bufSize = 0; |
| var envp = 0; |
| for (var string of getEnvStrings()) { |
| var ptr = environ_buf + bufSize; |
| HEAPU32[(((__environ)+(envp))>>2)] = ptr; |
| bufSize += stringToUTF8(string, ptr, Infinity) + 1; |
| envp += 4; |
| } |
| return 0; |
| }; |
| |
| |
| var _environ_sizes_get = (penviron_count, penviron_buf_size) => { |
| var strings = getEnvStrings(); |
| HEAPU32[((penviron_count)>>2)] = strings.length; |
| var bufSize = 0; |
| for (var string of strings) { |
| bufSize += lengthBytesUTF8(string) + 1; |
| } |
| HEAPU32[((penviron_buf_size)>>2)] = bufSize; |
| return 0; |
| }; |
| |
| function _fd_close(fd) { |
| try { |
| |
| var stream = SYSCALLS.getStreamFromFD(fd); |
| FS.close(stream); |
| return 0; |
| } catch (e) { |
| if (typeof FS == 'undefined' || !(e.name === 'ErrnoError')) throw e; |
| return e.errno; |
| } |
| } |
| |
| /** @param {number=} offset */ |
| var doReadv = (stream, iov, iovcnt, offset) => { |
| var ret = 0; |
| for (var i = 0; i < iovcnt; i++) { |
| var ptr = HEAPU32[((iov)>>2)]; |
| var len = HEAPU32[(((iov)+(4))>>2)]; |
| iov += 8; |
| var curr = FS.read(stream, HEAP8, ptr, len, offset); |
| if (curr < 0) return -1; |
| ret += curr; |
| if (curr < len) break; // nothing more to read |
| if (typeof offset != 'undefined') { |
| offset += curr; |
| } |
| } |
| return ret; |
| }; |
| |
| function _fd_read(fd, iov, iovcnt, pnum) { |
| try { |
| |
| var stream = SYSCALLS.getStreamFromFD(fd); |
| var num = doReadv(stream, iov, iovcnt); |
| HEAPU32[((pnum)>>2)] = num; |
| return 0; |
| } catch (e) { |
| if (typeof FS == 'undefined' || !(e.name === 'ErrnoError')) throw e; |
| return e.errno; |
| } |
| } |
| |
| |
| function _fd_seek(fd, offset, whence, newOffset) { |
| offset = bigintToI53Checked(offset); |
| |
| |
| try { |
| |
| if (isNaN(offset)) return 61; |
| var stream = SYSCALLS.getStreamFromFD(fd); |
| FS.llseek(stream, offset, whence); |
| HEAP64[((newOffset)>>3)] = BigInt(stream.position); |
| if (stream.getdents && offset === 0 && whence === 0) stream.getdents = null; // reset readdir state |
| return 0; |
| } catch (e) { |
| if (typeof FS == 'undefined' || !(e.name === 'ErrnoError')) throw e; |
| return e.errno; |
| } |
| ; |
| } |
| |
| /** @param {number=} offset */ |
| var doWritev = (stream, iov, iovcnt, offset) => { |
| var ret = 0; |
| for (var i = 0; i < iovcnt; i++) { |
| var ptr = HEAPU32[((iov)>>2)]; |
| var len = HEAPU32[(((iov)+(4))>>2)]; |
| iov += 8; |
| var curr = FS.write(stream, HEAP8, ptr, len, offset); |
| if (curr < 0) return -1; |
| ret += curr; |
| if (curr < len) { |
| // No more space to write. |
| break; |
| } |
| if (typeof offset != 'undefined') { |
| offset += curr; |
| } |
| } |
| return ret; |
| }; |
| |
| function _fd_write(fd, iov, iovcnt, pnum) { |
| try { |
| |
| var stream = SYSCALLS.getStreamFromFD(fd); |
| var num = doWritev(stream, iov, iovcnt); |
| HEAPU32[((pnum)>>2)] = num; |
| return 0; |
| } catch (e) { |
| if (typeof FS == 'undefined' || !(e.name === 'ErrnoError')) throw e; |
| return e.errno; |
| } |
| } |
| |
| |
| FS.createPreloadedFile = FS_createPreloadedFile; |
| FS.staticInit();; |
| |
| // This error may happen quite a bit. To avoid overhead we reuse it (and |
| // suffer a lack of stack info). |
| MEMFS.doesNotExistError = new FS.ErrnoError(44); |
| /** @suppress {checkTypes} */ |
| MEMFS.doesNotExistError.stack = '<generic error, no stack>'; |
| ; |
| init_ClassHandle(); |
| init_RegisteredPointer(); |
| // End JS library code |
| |
| // include: postlibrary.js |
| // This file is included after the automatically-generated JS library code |
| // but before the wasm module is created. |
| |
| { |
| |
| // Begin ATMODULES hooks |
| if (Module['noExitRuntime']) noExitRuntime = Module['noExitRuntime']; |
| if (Module['preloadPlugins']) preloadPlugins = Module['preloadPlugins']; |
| if (Module['print']) out = Module['print']; |
| if (Module['printErr']) err = Module['printErr']; |
| if (Module['wasmBinary']) wasmBinary = Module['wasmBinary']; |
| // End ATMODULES hooks |
| |
| if (Module['arguments']) arguments_ = Module['arguments']; |
| if (Module['thisProgram']) thisProgram = Module['thisProgram']; |
| |
| } |
| |
| // Begin runtime exports |
| // End runtime exports |
| // Begin JS library exports |
| // End JS library exports |
| |
| // end include: postlibrary.js |
| |
| |
| // Imports from the Wasm binary. |
| var ___getTypeName, |
| _free, |
| _malloc, |
| _emscripten_builtin_memalign, |
| _setThrew, |
| __emscripten_stack_restore, |
| __emscripten_stack_alloc, |
| _emscripten_stack_get_current; |
| |
| |
| function assignWasmExports(wasmExports) { |
| ___getTypeName = wasmExports['__getTypeName']; |
| _free = wasmExports['free']; |
| _malloc = wasmExports['malloc']; |
| _emscripten_builtin_memalign = wasmExports['emscripten_builtin_memalign']; |
| _setThrew = wasmExports['setThrew']; |
| __emscripten_stack_restore = wasmExports['_emscripten_stack_restore']; |
| __emscripten_stack_alloc = wasmExports['_emscripten_stack_alloc']; |
| _emscripten_stack_get_current = wasmExports['emscripten_stack_get_current']; |
| } |
| var wasmImports = { |
| /** @export */ |
| __cxa_throw: ___cxa_throw, |
| /** @export */ |
| __syscall_fcntl64: ___syscall_fcntl64, |
| /** @export */ |
| __syscall_fstat64: ___syscall_fstat64, |
| /** @export */ |
| __syscall_ioctl: ___syscall_ioctl, |
| /** @export */ |
| __syscall_lstat64: ___syscall_lstat64, |
| /** @export */ |
| __syscall_newfstatat: ___syscall_newfstatat, |
| /** @export */ |
| __syscall_openat: ___syscall_openat, |
| /** @export */ |
| __syscall_stat64: ___syscall_stat64, |
| /** @export */ |
| _abort_js: __abort_js, |
| /** @export */ |
| _embind_finalize_value_object: __embind_finalize_value_object, |
| /** @export */ |
| _embind_register_bigint: __embind_register_bigint, |
| /** @export */ |
| _embind_register_bool: __embind_register_bool, |
| /** @export */ |
| _embind_register_class: __embind_register_class, |
| /** @export */ |
| _embind_register_class_constructor: __embind_register_class_constructor, |
| /** @export */ |
| _embind_register_class_function: __embind_register_class_function, |
| /** @export */ |
| _embind_register_constant: __embind_register_constant, |
| /** @export */ |
| _embind_register_emval: __embind_register_emval, |
| /** @export */ |
| _embind_register_enum: __embind_register_enum, |
| /** @export */ |
| _embind_register_enum_value: __embind_register_enum_value, |
| /** @export */ |
| _embind_register_float: __embind_register_float, |
| /** @export */ |
| _embind_register_function: __embind_register_function, |
| /** @export */ |
| _embind_register_integer: __embind_register_integer, |
| /** @export */ |
| _embind_register_memory_view: __embind_register_memory_view, |
| /** @export */ |
| _embind_register_std_string: __embind_register_std_string, |
| /** @export */ |
| _embind_register_std_wstring: __embind_register_std_wstring, |
| /** @export */ |
| _embind_register_value_object: __embind_register_value_object, |
| /** @export */ |
| _embind_register_value_object_field: __embind_register_value_object_field, |
| /** @export */ |
| _embind_register_void: __embind_register_void, |
| /** @export */ |
| _emscripten_throw_longjmp: __emscripten_throw_longjmp, |
| /** @export */ |
| _emval_as: __emval_as, |
| /** @export */ |
| _emval_call: __emval_call, |
| /** @export */ |
| _emval_call_method: __emval_call_method, |
| /** @export */ |
| _emval_decref: __emval_decref, |
| /** @export */ |
| _emval_get_global: __emval_get_global, |
| /** @export */ |
| _emval_get_method_caller: __emval_get_method_caller, |
| /** @export */ |
| _emval_get_module_property: __emval_get_module_property, |
| /** @export */ |
| _emval_get_property: __emval_get_property, |
| /** @export */ |
| _emval_incref: __emval_incref, |
| /** @export */ |
| _emval_new_cstring: __emval_new_cstring, |
| /** @export */ |
| _emval_run_destructors: __emval_run_destructors, |
| /** @export */ |
| _mmap_js: __mmap_js, |
| /** @export */ |
| _munmap_js: __munmap_js, |
| /** @export */ |
| _tzset_js: __tzset_js, |
| /** @export */ |
| clock_time_get: _clock_time_get, |
| /** @export */ |
| emscripten_date_now: _emscripten_date_now, |
| /** @export */ |
| emscripten_get_heap_max: _emscripten_get_heap_max, |
| /** @export */ |
| emscripten_get_now: _emscripten_get_now, |
| /** @export */ |
| emscripten_resize_heap: _emscripten_resize_heap, |
| /** @export */ |
| environ_get: _environ_get, |
| /** @export */ |
| environ_sizes_get: _environ_sizes_get, |
| /** @export */ |
| fd_close: _fd_close, |
| /** @export */ |
| fd_read: _fd_read, |
| /** @export */ |
| fd_seek: _fd_seek, |
| /** @export */ |
| fd_write: _fd_write, |
| /** @export */ |
| invoke_ii, |
| /** @export */ |
| invoke_vi, |
| /** @export */ |
| invoke_vii, |
| /** @export */ |
| invoke_viii |
| }; |
| var wasmExports = await createWasm(); |
| |
| function invoke_vi(index,a1) { |
| var sp = stackSave(); |
| try { |
| getWasmTableEntry(index)(a1); |
| } catch(e) { |
| stackRestore(sp); |
| if (e !== e+0) throw e; |
| _setThrew(1, 0); |
| } |
| } |
| |
| function invoke_viii(index,a1,a2,a3) { |
| var sp = stackSave(); |
| try { |
| getWasmTableEntry(index)(a1,a2,a3); |
| } catch(e) { |
| stackRestore(sp); |
| if (e !== e+0) throw e; |
| _setThrew(1, 0); |
| } |
| } |
| |
| function invoke_ii(index,a1) { |
| var sp = stackSave(); |
| try { |
| return getWasmTableEntry(index)(a1); |
| } catch(e) { |
| stackRestore(sp); |
| if (e !== e+0) throw e; |
| _setThrew(1, 0); |
| } |
| } |
| |
| function invoke_vii(index,a1,a2) { |
| var sp = stackSave(); |
| try { |
| getWasmTableEntry(index)(a1,a2); |
| } catch(e) { |
| stackRestore(sp); |
| if (e !== e+0) throw e; |
| _setThrew(1, 0); |
| } |
| } |
| |
| |
| // include: postamble.js |
| // === Auto-generated postamble setup entry stuff === |
| |
| function run() { |
| |
| if (runDependencies > 0) { |
| dependenciesFulfilled = run; |
| return; |
| } |
| |
| preRun(); |
| |
| // a preRun added a dependency, run will be called later |
| if (runDependencies > 0) { |
| dependenciesFulfilled = run; |
| return; |
| } |
| |
| function doRun() { |
| // run may have just been called through dependencies being fulfilled just in this very frame, |
| // or while the async setStatus time below was happening |
| Module['calledRun'] = true; |
| |
| if (ABORT) return; |
| |
| initRuntime(); |
| |
| readyPromiseResolve?.(Module); |
| Module['onRuntimeInitialized']?.(); |
| |
| postRun(); |
| } |
| |
| if (Module['setStatus']) { |
| Module['setStatus']('Running...'); |
| setTimeout(() => { |
| setTimeout(() => Module['setStatus'](''), 1); |
| doRun(); |
| }, 1); |
| } else |
| { |
| doRun(); |
| } |
| } |
| |
| function preInit() { |
| if (Module['preInit']) { |
| if (typeof Module['preInit'] == 'function') Module['preInit'] = [Module['preInit']]; |
| while (Module['preInit'].length > 0) { |
| Module['preInit'].shift()(); |
| } |
| } |
| } |
| |
| preInit(); |
| run(); |
| |
| // end include: postamble.js |
| |
| // include: postamble_modularize.js |
| // In MODULARIZE mode we wrap the generated code in a factory function |
| // and return either the Module itself, or a promise of the module. |
| // |
| // We assign to the `moduleRtn` global here and configure closure to see |
| // this as and extern so it won't get minified. |
| |
| if (runtimeInitialized) { |
| moduleRtn = Module; |
| } else { |
| // Set up the promise that indicates the Module is initialized |
| moduleRtn = new Promise((resolve, reject) => { |
| readyPromiseResolve = resolve; |
| readyPromiseReject = reject; |
| }); |
| } |
| |
| // end include: postamble_modularize.js |
| |
| |
| |
| return moduleRtn; |
| } |
| ); |
| })(); |
| if (typeof exports === 'object' && typeof module === 'object') { |
| module.exports = BASIS; |
| // This default export looks redundant, but it allows TS to import this |
| // commonjs style module. |
| module.exports.default = BASIS; |
| } else if (typeof define === 'function' && define['amd']) |
| define([], () => BASIS); |