function _mergeNamespaces(b, n) { for (var a = 0; a < n.length; a++) { const u = n[a]; if (typeof u != "string" && !Array.isArray(u)) { for (const c in u) if (c !== "default" && !(c in b)) { const f = Object.getOwnPropertyDescriptor(u, c); f && Object.defineProperty(b, c, f.get ? f : { enumerable: !0, get: () => u[c] }) } } } return Object.freeze(Object.defineProperty(b, Symbol.toStringTag, { value: "Module" })) }(function() { const n = document.createElement("link").relList; if (n && n.supports && n.supports("modulepreload")) return; for (const c of document.querySelectorAll('link[rel="modulepreload"]')) u(c); new MutationObserver(c => { for (const f of c) if (f.type === "childList") for (const s of f.addedNodes) s.tagName === "LINK" && s.rel === "modulepreload" && u(s) }).observe(document, { childList: !0, subtree: !0 }); function a(c) { const f = {}; return c.integrity && (f.integrity = c.integrity), c.referrerPolicy && (f.referrerPolicy = c.referrerPolicy), c.crossOrigin === "use-credentials" ? f.credentials = "include" : c.crossOrigin === "anonymous" ? f.credentials = "omit" : f.credentials = "same-origin", f } function u(c) { if (c.ep) return; c.ep = !0; const f = a(c); fetch(c.href, f) } })(); function dispatchCallback(b, n) { b && b(n) } function reverseDictionary(b) { return Object.fromEntries(Object.entries(b).map(([n, a]) => [a, n])) } const Callable = class { constructor() { let b = function(...n) { return b._call(...n) }; return Object.setPrototypeOf(b, new.target.prototype) } _call(...b) { throw Error("Must implement _call method in subclass") } }; function isTypedArray(b) { return b?.prototype?.__proto__?.constructor?.name === "TypedArray" } function isIntegralNumber(b) { return Number.isInteger(b) || typeof b == "bigint" } function exists(b) { return b != null } function calculateDimensions(b) { const n = []; let a = b; for (; Array.isArray(a);) n.push(a.length), a = a[0]; return n } function mergeArrays(...b) { return Array.prototype.concat.apply([], b) } function calculateReflectOffset(b, n) { return Math.abs((b + n) % (2 * n) - n) } const sharp = {}, ONNX_NODE = Object.freeze(Object.defineProperty({ __proto__: null, default: sharp }, Symbol.toStringTag, { value: "Module" })); function getDefaultExportFromCjs(b) { return b && b.__esModule && Object.prototype.hasOwnProperty.call(b, "default") ? b.default : b } function getAugmentedNamespace(b) { if (b.__esModule) return b; var n = b.default; if (typeof n == "function") { var a = function u() { return this instanceof u ? Reflect.construct(n, arguments, this.constructor) : n.apply(this, arguments) }; a.prototype = n.prototype } else a = {}; return Object.defineProperty(a, "__esModule", { value: !0 }), Object.keys(b).forEach(function(u) { var c = Object.getOwnPropertyDescriptor(b, u); Object.defineProperty(a, u, c.get ? c : { enumerable: !0, get: function() { return b[u] } }) }), a } var ortWeb_min$1 = { exports: {} }; const backends = {}, backendsSortedByPriority = [], registerBackend = (b, n, a) => { if (n && typeof n.init == "function" && typeof n.createSessionHandler == "function") { const u = backends[b]; if (u === void 0) backends[b] = { backend: n, priority: a }; else { if (u.priority > a) return; if (u.priority === a && u.backend !== n) throw new Error(`cannot register backend "${b}" using priority ${a}`) } if (a >= 0) { const c = backendsSortedByPriority.indexOf(b); c !== -1 && backendsSortedByPriority.splice(c, 1); for (let f = 0; f < backendsSortedByPriority.length; f++) if (backends[backendsSortedByPriority[f]].priority <= a) { backendsSortedByPriority.splice(f, 0, b); return } backendsSortedByPriority.push(b) } return } throw new TypeError("not a valid backend") }, resolveBackend = async b => { const n = b.length === 0 ? backendsSortedByPriority : b, a = []; for (const u of n) { const c = backends[u]; if (c) { if (c.initialized) return c.backend; if (c.aborted) continue; const f = !!c.initPromise; try { return f || (c.initPromise = c.backend.init()), await c.initPromise, c.initialized = !0, c.backend } catch (s) { f || a.push({ name: u, err: s }), c.aborted = !0 } finally { delete c.initPromise } } } throw new Error(`no available backend found. ERR: ${a.map(u=>`[${u.name}] ${u.err}`).join(", ")}`) }; class EnvImpl { constructor() { this.wasm = {}, this.webgl = {}, this.logLevelInternal = "warning" } set logLevel(n) { if (n !== void 0) { if (typeof n != "string" || ["verbose", "info", "warning", "error", "fatal"].indexOf(n) === -1) throw new Error(`Unsupported logging level: ${n}`); this.logLevelInternal = n } } get logLevel() { return this.logLevelInternal } } const env$2 = new EnvImpl, isBigInt64ArrayAvailable = typeof BigInt64Array < "u" && typeof BigInt64Array.from == "function", isBigUint64ArrayAvailable = typeof BigUint64Array < "u" && typeof BigUint64Array.from == "function", NUMERIC_TENSOR_TYPE_TO_TYPEDARRAY_MAP = new Map([ ["float32", Float32Array], ["uint8", Uint8Array], ["int8", Int8Array], ["uint16", Uint16Array], ["int16", Int16Array], ["int32", Int32Array], ["bool", Uint8Array], ["float64", Float64Array], ["uint32", Uint32Array] ]), NUMERIC_TENSOR_TYPEDARRAY_TO_TYPE_MAP = new Map([ [Float32Array, "float32"], [Uint8Array, "uint8"], [Int8Array, "int8"], [Uint16Array, "uint16"], [Int16Array, "int16"], [Int32Array, "int32"], [Float64Array, "float64"], [Uint32Array, "uint32"] ]); isBigInt64ArrayAvailable && (NUMERIC_TENSOR_TYPE_TO_TYPEDARRAY_MAP.set("int64", BigInt64Array), NUMERIC_TENSOR_TYPEDARRAY_TO_TYPE_MAP.set(BigInt64Array, "int64")); isBigUint64ArrayAvailable && (NUMERIC_TENSOR_TYPE_TO_TYPEDARRAY_MAP.set("uint64", BigUint64Array), NUMERIC_TENSOR_TYPEDARRAY_TO_TYPE_MAP.set(BigUint64Array, "uint64")); const calculateSize = b => { let n = 1; for (let a = 0; a < b.length; a++) { const u = b[a]; if (typeof u != "number" || !Number.isSafeInteger(u)) throw new TypeError(`dims[${a}] must be an integer, got: ${u}`); if (u < 0) throw new RangeError(`dims[${a}] must be a non-negative integer, got: ${u}`); n *= u } return n }; let Tensor$2 = class st { constructor(n, a, u) { let c, f, s; if (typeof n == "string") if (c = n, s = u, n === "string") { if (!Array.isArray(a)) throw new TypeError("A string tensor's data must be a string array."); f = a } else { const p = NUMERIC_TENSOR_TYPE_TO_TYPEDARRAY_MAP.get(n); if (p === void 0) throw new TypeError(`Unsupported tensor type: ${n}.`); if (Array.isArray(a)) f = p.from(a); else if (a instanceof p) f = a; else throw new TypeError(`A ${c} tensor's data must be type of ${p}`) } else if (s = a, Array.isArray(n)) { if (n.length === 0) throw new TypeError("Tensor type cannot be inferred from an empty array."); const p = typeof n[0]; if (p === "string") c = "string", f = n; else if (p === "boolean") c = "bool", f = Uint8Array.from(n); else throw new TypeError(`Invalid element type of data array: ${p}.`) } else { const p = NUMERIC_TENSOR_TYPEDARRAY_TO_TYPE_MAP.get(n.constructor); if (p === void 0) throw new TypeError(`Unsupported type for tensor data: ${n.constructor}.`); c = p, f = n } if (s === void 0) s = [f.length]; else if (!Array.isArray(s)) throw new TypeError("A tensor's dims must be a number array"); const h = calculateSize(s); if (h !== f.length) throw new Error(`Tensor's size(${h}) does not match data length(${f.length}).`); this.dims = s, this.type = c, this.data = f, this.size = h } static bufferToTensor(n, a) { if (n === void 0) throw new Error("Image buffer must be defined"); if (a.height === void 0 || a.width === void 0) throw new Error("Image height and width must be defined"); const { height: u, width: c } = a, f = a.norm; let s, h; f === void 0 || f.mean === void 0 ? s = 255 : s = f.mean, f === void 0 || f.bias === void 0 ? h = 0 : h = f.bias; const p = a.bitmapFormat !== void 0 ? a.bitmapFormat : "RGBA", l = a.tensorFormat !== void 0 && a.tensorFormat !== void 0 ? a.tensorFormat : "RGB", o = u * c, t = l === "RGBA" ? new Float32Array(o * 4) : new Float32Array(o * 3); let e = 4, r = 0, i = 1, d = 2, g = 3, m = 0, _ = o, y = o * 2, T = -1; p === "RGB" && (e = 3, r = 0, i = 1, d = 2, g = -1), l === "RGBA" ? T = o * 3 : l === "RBG" ? (m = 0, y = o, _ = o * 2) : l === "BGR" && (y = 0, _ = o, m = o * 2); for (let S = 0; S < o; S++, r += e, d += e, i += e, g += e) t[m++] = (n[r] + h) / s, t[_++] = (n[i] + h) / s, t[y++] = (n[d] + h) / s, T !== -1 && g !== -1 && (t[T++] = (n[g] + h) / s); return l === "RGBA" ? new st("float32", t, [1, 4, u, c]) : new st("float32", t, [1, 3, u, c]) } static async fromImage(n, a) { const u = typeof HTMLImageElement < "u" && n instanceof HTMLImageElement, c = typeof ImageData < "u" && n instanceof ImageData, f = typeof ImageBitmap < "u" && n instanceof ImageBitmap, s = typeof String < "u" && (n instanceof String || typeof n == "string"); let h, p = {}; if (u) { const l = document.createElement("canvas"), o = l.getContext("2d"); if (o != null) { let t = n.naturalHeight, e = n.naturalWidth; if (a !== void 0 && a.resizedHeight !== void 0 && a.resizedWidth !== void 0 && (t = a.resizedHeight, e = a.resizedWidth), a !== void 0) { if (p = a, a.tensorFormat !== void 0) throw new Error("Image input config format must be RGBA for HTMLImageElement"); if (p.tensorFormat = "RGBA", a.height !== void 0 && a.height !== t) throw new Error("Image input config height doesn't match HTMLImageElement height"); if (p.height = t, a.width !== void 0 && a.width !== e) throw new Error("Image input config width doesn't match HTMLImageElement width"); p.width = e } else p.tensorFormat = "RGBA", p.height = t, p.width = e; l.width = e, l.height = t, o.drawImage(n, 0, 0, e, t), h = o.getImageData(0, 0, e, t).data } else throw new Error("Can not access image data") } else if (c) { const l = "RGBA"; let o, t; if (a !== void 0 && a.resizedWidth !== void 0 && a.resizedHeight !== void 0 ? (o = a.resizedHeight, t = a.resizedWidth) : (o = n.height, t = n.width), a !== void 0) { if (p = a, a.bitmapFormat !== void 0 && a.bitmapFormat !== l) throw new Error("Image input config format must be RGBA for ImageData"); p.bitmapFormat = "RGBA" } else p.bitmapFormat = "RGBA"; if (p.height = o, p.width = t, a !== void 0) { const e = document.createElement("canvas"); e.width = t, e.height = o; const r = e.getContext("2d"); if (r != null) r.putImageData(n, 0, 0), h = r.getImageData(0, 0, t, o).data; else throw new Error("Can not access image data") } else h = n.data } else if (f) { if (a === void 0) throw new Error("Please provide image config with format for Imagebitmap"); if (a.bitmapFormat !== void 0) throw new Error("Image input config format must be defined for ImageBitmap"); const l = document.createElement("canvas").getContext("2d"); if (l != null) { const o = n.height, t = n.width; if (l.drawImage(n, 0, 0, t, o), h = l.getImageData(0, 0, t, o).data, a !== void 0) { if (a.height !== void 0 && a.height !== o) throw new Error("Image input config height doesn't match ImageBitmap height"); if (p.height = o, a.width !== void 0 && a.width !== t) throw new Error("Image input config width doesn't match ImageBitmap width"); p.width = t } else p.height = o, p.width = t; return st.bufferToTensor(h, p) } else throw new Error("Can not access image data") } else { if (s) return new Promise((l, o) => { const t = document.createElement("canvas"), e = t.getContext("2d"); if (!n || !e) return o(); const r = new Image; r.crossOrigin = "Anonymous", r.src = n, r.onload = () => { t.width = r.width, t.height = r.height, e.drawImage(r, 0, 0, t.width, t.height); const i = e.getImageData(0, 0, t.width, t.height); if (a !== void 0) { if (a.height !== void 0 && a.height !== t.height) throw new Error("Image input config height doesn't match ImageBitmap height"); if (p.height = t.height, a.width !== void 0 && a.width !== t.width) throw new Error("Image input config width doesn't match ImageBitmap width"); p.width = t.width } else p.height = t.height, p.width = t.width; l(st.bufferToTensor(i.data, p)) } }); throw new Error("Input data provided is not supported - aborted tensor creation") } if (h !== void 0) return st.bufferToTensor(h, p); throw new Error("Input data provided is not supported - aborted tensor creation") } toImageData(n) { var a, u; const c = document.createElement("canvas").getContext("2d"); let f; if (c != null) { const s = this.dims[3], h = this.dims[2], p = this.dims[1], l = n !== void 0 && n.format !== void 0 ? n.format : "RGB", o = n !== void 0 && ((a = n.norm) === null || a === void 0 ? void 0 : a.mean) !== void 0 ? n.norm.mean : 255, t = n !== void 0 && ((u = n.norm) === null || u === void 0 ? void 0 : u.bias) !== void 0 ? n.norm.bias : 0, e = h * s; if (n !== void 0) { if (n.height !== void 0 && n.height !== h) throw new Error("Image output config height doesn't match tensor height"); if (n.width !== void 0 && n.width !== s) throw new Error("Image output config width doesn't match tensor width"); if (n.format !== void 0 && p === 4 && n.format !== "RGBA" || p === 3 && n.format !== "RGB" && n.format !== "BGR") throw new Error("Tensor format doesn't match input tensor dims") } const r = 4; let i = 0, d = 1, g = 2, m = 3, _ = 0, y = e, T = e * 2, w = -1; l === "RGBA" ? (_ = 0, y = e, T = e * 2, w = e * 3) : l === "RGB" ? (_ = 0, y = e, T = e * 2) : l === "RBG" && (_ = 0, T = e, y = e * 2), f = c.createImageData(s, h); for (let S = 0; S < h * s; i += r, d += r, g += r, m += r, S++) f.data[i] = (this.data[_++] - t) * o, f.data[d] = (this.data[y++] - t) * o, f.data[g] = (this.data[T++] - t) * o, f.data[m] = w === -1 ? 255 : (this.data[w++] - t) * o } else throw new Error("Can not access image data"); return f } reshape(n) { return new st(this.type, this.data, n) } }; const Tensor$1 = Tensor$2; let InferenceSession$2 = class ln { constructor(n) { this.handler = n } async run(n, a, u) { const c = {}; let f = {}; if (typeof n != "object" || n === null || n instanceof Tensor$1 || Array.isArray(n)) throw new TypeError("'feeds' must be an object that use input names as keys and OnnxValue as corresponding values."); let s = !0; if (typeof a == "object") { if (a === null) throw new TypeError("Unexpected argument[1]: cannot be null."); if (a instanceof Tensor$1) throw new TypeError("'fetches' cannot be a Tensor"); if (Array.isArray(a)) { if (a.length === 0) throw new TypeError("'fetches' cannot be an empty array."); s = !1; for (const l of a) { if (typeof l != "string") throw new TypeError("'fetches' must be a string array or an object."); if (this.outputNames.indexOf(l) === -1) throw new RangeError(`'fetches' contains invalid output name: ${l}.`); c[l] = null } if (typeof u == "object" && u !== null) f = u; else if (typeof u < "u") throw new TypeError("'options' must be an object.") } else { let l = !1; const o = Object.getOwnPropertyNames(a); for (const t of this.outputNames) if (o.indexOf(t) !== -1) { const e = a[t]; (e === null || e instanceof Tensor$1) && (l = !0, s = !1, c[t] = e) } if (l) { if (typeof u == "object" && u !== null) f = u; else if (typeof u < "u") throw new TypeError("'options' must be an object.") } else f = a } } else if (typeof a < "u") throw new TypeError("Unexpected argument[1]: must be 'fetches' or 'options'."); for (const l of this.inputNames) if (typeof n[l] > "u") throw new Error(`input '${l}' is missing in 'feeds'.`); if (s) for (const l of this.outputNames) c[l] = null; const h = await this.handler.run(n, c, f), p = {}; for (const l in h) Object.hasOwnProperty.call(h, l) && (p[l] = new Tensor$1(h[l].type, h[l].data, h[l].dims)); return p } static async create(n, a, u, c) { let f, s = {}; if (typeof n == "string") { if (f = n, typeof a == "object" && a !== null) s = a; else if (typeof a < "u") throw new TypeError("'options' must be an object.") } else if (n instanceof Uint8Array) { if (f = n, typeof a == "object" && a !== null) s = a; else if (typeof a < "u") throw new TypeError("'options' must be an object.") } else if (n instanceof ArrayBuffer || typeof SharedArrayBuffer < "u" && n instanceof SharedArrayBuffer) { const t = n; let e = 0, r = n.byteLength; if (typeof a == "object" && a !== null) s = a; else if (typeof a == "number") { if (e = a, !Number.isSafeInteger(e)) throw new RangeError("'byteOffset' must be an integer."); if (e < 0 || e >= t.byteLength) throw new RangeError(`'byteOffset' is out of range [0, ${t.byteLength}).`); if (r = n.byteLength - e, typeof u == "number") { if (r = u, !Number.isSafeInteger(r)) throw new RangeError("'byteLength' must be an integer."); if (r <= 0 || e + r > t.byteLength) throw new RangeError(`'byteLength' is out of range (0, ${t.byteLength-e}].`); if (typeof c == "object" && c !== null) s = c; else if (typeof c < "u") throw new TypeError("'options' must be an object.") } else if (typeof u < "u") throw new TypeError("'byteLength' must be a number.") } else if (typeof a < "u") throw new TypeError("'options' must be an object."); f = new Uint8Array(t, e, r) } else throw new TypeError("Unexpected argument[0]: must be 'path' or 'buffer'."); const p = (s.executionProviders || []).map(t => typeof t == "string" ? t : t.name), o = await (await resolveBackend(p)).createSessionHandler(f, s); return new ln(o) } startProfiling() { this.handler.startProfiling() } endProfiling() { this.handler.endProfiling() } get inputNames() { return this.handler.inputNames } get outputNames() { return this.handler.outputNames } }; const InferenceSession$1 = InferenceSession$2, lib = Object.freeze(Object.defineProperty({ __proto__: null, InferenceSession: InferenceSession$1, Tensor: Tensor$1, env: env$2, registerBackend }, Symbol.toStringTag, { value: "Module" })), require$$0 = getAugmentedNamespace(lib); /*! * ONNX Runtime Web v1.14.0 * Copyright (c) Microsoft Corporation. All rights reserved. * Licensed under the MIT License. */ (function(module, exports) { (function(b, n) { module.exports = n(require$$0) })(self, __WEBPACK_EXTERNAL_MODULE__1670__ => (() => { var __webpack_modules__ = { 3474: (b, n, a) => { var u, c = (u = (u = typeof document < "u" && document.currentScript ? document.currentScript.src : void 0) || "/index.js", function(f) { function s() { return z.buffer != J && Me(z.buffer), ue } function h() { return z.buffer != J && Me(z.buffer), Se } function p() { return z.buffer != J && Me(z.buffer), Te } function l() { return z.buffer != J && Me(z.buffer), se } function o() { return z.buffer != J && Me(z.buffer), ye } var t, e, r; f = f || {}, t || (t = f !== void 0 ? f : {}), t.ready = new Promise(function(x, A) { e = x, r = A }); var i, d, g, m, _, y, T = Object.assign({}, t), w = "./this.program", S = (x, A) => { throw A }, O = typeof window == "object", E = typeof importScripts == "function", v = typeof process == "object" && typeof process.versions == "object" && typeof process.versions.node == "string", P = t.ENVIRONMENT_IS_PTHREAD || !1, L = ""; function V(x) { return t.locateFile ? t.locateFile(x, L) : L + x } if (v) { let x; L = E ? a(908).dirname(L) + "/" : "//", y = () => { _ || (m = a(1384), _ = a(908)) }, i = function(A, I) { return y(), A = _.normalize(A), m.readFileSync(A, I ? void 0 : "utf8") }, g = A => ((A = i(A, !0)).buffer || (A = new Uint8Array(A)), A), d = (A, I, F) => { y(), A = _.normalize(A), m.readFile(A, function(B, G) { B ? F(B) : I(G.buffer) }) }, 1 < process.argv.length && (w = process.argv[1].replace(/\\/g, "/")), process.argv.slice(2), process.on("uncaughtException", function(A) { if (!(A instanceof Qe)) throw A }), process.on("unhandledRejection", function(A) { throw A }), S = (A, I) => { if (Ge()) throw process.exitCode = A, I; I instanceof Qe || $("exiting due to exception: " + I), process.exit(A) }, t.inspect = function() { return "[Emscripten Module object]" }; try { x = a(9925) } catch (A) { throw console.error('The "worker_threads" module is not supported in this node.js build - perhaps a newer version is needed?'), A } a.g.Worker = x.Worker } else(O || E) && (E ? L = self.location.href : typeof document < "u" && document.currentScript && (L = document.currentScript.src), u && (L = u), L = L.indexOf("blob:") !== 0 ? L.substr(0, L.replace(/[?#].*/, "").lastIndexOf("/") + 1) : "", v || (i = x => { var A = new XMLHttpRequest; return A.open("GET", x, !1), A.send(null), A.responseText }, E && (g = x => { var A = new XMLHttpRequest; return A.open("GET", x, !1), A.responseType = "arraybuffer", A.send(null), new Uint8Array(A.response) }), d = (x, A, I) => { var F = new XMLHttpRequest; F.open("GET", x, !0), F.responseType = "arraybuffer", F.onload = () => { F.status == 200 || F.status == 0 && F.response ? A(F.response) : I() }, F.onerror = I, F.send(null) })); v && typeof performance > "u" && (a.g.performance = a(6953).performance); var R = console.log.bind(console), k = console.warn.bind(console); v && (y(), R = x => m.writeSync(1, x + ` `), k = x => m.writeSync(2, x + ` `)); var Y, C = t.print || R, $ = t.printErr || k; Object.assign(t, T), T = null, t.thisProgram && (w = t.thisProgram), t.quit && (S = t.quit), t.wasmBinary && (Y = t.wasmBinary); var X = t.noExitRuntime || !1; typeof WebAssembly != "object" && pe("no native wasm support detected"); var z, Z, J, ue, Se, Te, se, ye, be = !1, Ie = typeof TextDecoder < "u" ? new TextDecoder("utf8") : void 0; function Le(x, A, I) { var F = (A >>>= 0) + I; for (I = A; x[I] && !(I >= F);) ++I; if (16 < I - A && x.buffer && Ie) return Ie.decode(x.buffer instanceof SharedArrayBuffer ? x.slice(A, I) : x.subarray(A, I)); for (F = ""; A < I;) { var B = x[A++]; if (128 & B) { var G = 63 & x[A++]; if ((224 & B) == 192) F += String.fromCharCode((31 & B) << 6 | G); else { var Q = 63 & x[A++]; 65536 > (B = (240 & B) == 224 ? (15 & B) << 12 | G << 6 | Q : (7 & B) << 18 | G << 12 | Q << 6 | 63 & x[A++]) ? F += String.fromCharCode(B) : (B -= 65536, F += String.fromCharCode(55296 | B >> 10, 56320 | 1023 & B)) } } else F += String.fromCharCode(B) } return F } function ve(x, A) { return (x >>>= 0) ? Le(h(), x, A) : "" } function Ne(x, A, I, F) { if (!(0 < F)) return 0; var B = I >>>= 0; F = I + F - 1; for (var G = 0; G < x.length; ++G) { var Q = x.charCodeAt(G); if (55296 <= Q && 57343 >= Q && (Q = 65536 + ((1023 & Q) << 10) | 1023 & x.charCodeAt(++G)), 127 >= Q) { if (I >= F) break; A[I++ >>> 0] = Q } else { if (2047 >= Q) { if (I + 1 >= F) break; A[I++ >>> 0] = 192 | Q >> 6 } else { if (65535 >= Q) { if (I + 2 >= F) break; A[I++ >>> 0] = 224 | Q >> 12 } else { if (I + 3 >= F) break; A[I++ >>> 0] = 240 | Q >> 18, A[I++ >>> 0] = 128 | Q >> 12 & 63 } A[I++ >>> 0] = 128 | Q >> 6 & 63 } A[I++ >>> 0] = 128 | 63 & Q } } return A[I >>> 0] = 0, I - B } function Fe(x) { for (var A = 0, I = 0; I < x.length; ++I) { var F = x.charCodeAt(I); 127 >= F ? A++ : 2047 >= F ? A += 2 : 55296 <= F && 57343 >= F ? (A += 4, ++I) : A += 3 } return A } function Me(x) { J = x, t.HEAP8 = ue = new Int8Array(x), t.HEAP16 = new Int16Array(x), t.HEAP32 = Te = new Int32Array(x), t.HEAPU8 = Se = new Uint8Array(x), t.HEAPU16 = new Uint16Array(x), t.HEAPU32 = se = new Uint32Array(x), t.HEAPF32 = new Float32Array(x), t.HEAPF64 = ye = new Float64Array(x) } P && (J = t.buffer); var Oe = t.INITIAL_MEMORY || 16777216; if (P) z = t.wasmMemory, J = t.buffer; else if (t.wasmMemory) z = t.wasmMemory; else if (!((z = new WebAssembly.Memory({ initial: Oe / 65536, maximum: 65536, shared: !0 })).buffer instanceof SharedArrayBuffer)) throw $("requested a shared WebAssembly.Memory but the returned buffer is not a SharedArrayBuffer, indicating that while the browser has SharedArrayBuffer it does not have WebAssembly threads support - you may need to set a flag"), v && console.log("(on node you may need: --experimental-wasm-threads --experimental-wasm-bulk-memory and also use a recent version)"), Error("bad memory"); z && (J = z.buffer), Oe = J.byteLength, Me(J); var Be, Ue = [], ze = [], He = [], Ke = []; function Ge() { return X || !1 } function Ve() { var x = t.preRun.shift(); Ue.unshift(x) } var Ae, Re = 0, Ye = null; function pe(x) { throw P ? postMessage({ cmd: "onAbort", arg: x }) : t.onAbort && t.onAbort(x), $(x = "Aborted(" + x + ")"), be = !0, x = new WebAssembly.RuntimeError(x + ". Build with -sASSERTIONS for more info."), r(x), x } function dt() { return Ae.startsWith("data:application/octet-stream;base64,") } function at() { var x = Ae; try { if (x == Ae && Y) return new Uint8Array(Y); if (g) return g(x); throw "both async and sync fetching of the wasm failed" } catch (A) { pe(A) } } Ae = "ort-wasm-threaded.wasm", dt() || (Ae = V(Ae)); var Et = {}; function Qe(x) { this.name = "ExitStatus", this.message = "Program terminated with exit(" + x + ")", this.status = x } function ut(x) { (x = re.Vb[x]) || pe(), re.mc(x) } function lt(x) { var A = re.Cc(); if (!A) return 6; re.ac.push(A), re.Vb[x.Ub] = A, A.Ub = x.Ub; var I = { cmd: "run", start_routine: x.Ic, arg: x.zc, pthread_ptr: x.Ub }; return A.$b = () => { I.time = performance.now(), A.postMessage(I, x.Nc) }, A.loaded && (A.$b(), delete A.$b), 0 } function $e(x) { if (P) return ee(1, 1, x); Ge() || (re.oc(), t.onExit && t.onExit(x), be = !0), S(x, new Qe(x)) } function nt(x, A) { if (!A && P) throw Mt(x), "unwind"; Ge() || P || (Gt(), tt(He), zt(0), Ft[1].length && kt(1, 10), Ft[2].length && kt(2, 10), re.oc()), $e(x) } var re = { Yb: [], ac: [], qc: [], Vb: {}, fc: function() { P && re.Ec() }, Pc: function() {}, Ec: function() { re.receiveObjectTransfer = re.Gc, re.threadInitTLS = re.pc, re.setExitStatus = re.nc, X = !1 }, nc: function() {}, oc: function() { for (var x of Object.values(re.Vb)) re.mc(x); for (x of re.Yb) x.terminate(); re.Yb = [] }, mc: function(x) { var A = x.Ub; delete re.Vb[A], re.Yb.push(x), re.ac.splice(re.ac.indexOf(x), 1), x.Ub = 0, Nt(A) }, Gc: function() {}, pc: function() { re.qc.forEach(x => x()) }, Fc: function(x, A) { x.onmessage = I => { var F = (I = I.data).cmd; if (x.Ub && (re.Bc = x.Ub), I.targetThread && I.targetThread != It()) { var B = re.Vb[I.Qc]; B ? B.postMessage(I, I.transferList) : $('Internal error! Worker sent a message "' + F + '" to target pthread ' + I.targetThread + ", but that thread no longer exists!") } else F === "processProxyingQueue" ? N(I.queue) : F === "spawnThread" ? lt(I) : F === "cleanupThread" ? ut(I.thread) : F === "killThread" ? (I = I.thread, F = re.Vb[I], delete re.Vb[I], F.terminate(), Nt(I), re.ac.splice(re.ac.indexOf(F), 1), F.Ub = 0) : F === "cancelThread" ? re.Vb[I.thread].postMessage({ cmd: "cancel" }) : F === "loaded" ? (x.loaded = !0, A && A(x), x.$b && (x.$b(), delete x.$b)) : F === "print" ? C("Thread " + I.threadId + ": " + I.text) : F === "printErr" ? $("Thread " + I.threadId + ": " + I.text) : F === "alert" ? alert("Thread " + I.threadId + ": " + I.text) : I.target === "setimmediate" ? x.postMessage(I) : F === "onAbort" ? t.onAbort && t.onAbort(I.arg) : F && $("worker sent an unknown command " + F); re.Bc = void 0 }, x.onerror = I => { throw $("worker sent an error! " + I.filename + ":" + I.lineno + ": " + I.message), I }, v && (x.on("message", function(I) { x.onmessage({ data: I }) }), x.on("error", function(I) { x.onerror(I) }), x.on("detachedExit", function() {})), x.postMessage({ cmd: "load", urlOrBlob: t.mainScriptUrlOrBlob || u, wasmMemory: z, wasmModule: Z }) }, yc: function() { var x = V("ort-wasm-threaded.worker.js"); re.Yb.push(new Worker(x)) }, Cc: function() { return re.Yb.length == 0 && (re.yc(), re.Fc(re.Yb[0])), re.Yb.pop() } }; function tt(x) { for (; 0 < x.length;) x.shift()(t) } function At(x) { var A = de(); return x = x(), ce(A), x } function Mt(x) { if (P) return ee(2, 0, x); try { nt(x) } catch (A) { A instanceof Qe || A == "unwind" || S(1, A) } } t.PThread = re, t.establishStackSpace = function() { var x = It(), A = p()[x + 44 >> 2 >>> 0]; x = p()[x + 48 >> 2 >>> 0], Xt(A, A - x), ce(A) }; var Je = []; function we(x) { var A = Je[x]; return A || (x >= Je.length && (Je.length = x + 1), Je[x] = A = Be.get(x)), A } t.invokeEntryPoint = function(x, A) { x = we(x)(A), Ge() ? re.nc(x) : Yt(x) }; var rt, ht, it = [], ae = 0, ie = 0; function oe(x) { this.Zb = x, this.Sb = x - 24, this.xc = function(A) { l()[this.Sb + 4 >> 2 >>> 0] = A }, this.bc = function() { return l()[this.Sb + 4 >> 2 >>> 0] }, this.wc = function(A) { l()[this.Sb + 8 >> 2 >>> 0] = A }, this.Dc = function() { return l()[this.Sb + 8 >> 2 >>> 0] }, this.rc = function() { p()[this.Sb >> 2 >>> 0] = 0 }, this.hc = function(A) { A = A ? 1 : 0, s()[this.Sb + 12 >> 0 >>> 0] = A }, this.uc = function() { return s()[this.Sb + 12 >> 0 >>> 0] != 0 }, this.ic = function(A) { A = A ? 1 : 0, s()[this.Sb + 13 >> 0 >>> 0] = A }, this.kc = function() { return s()[this.Sb + 13 >> 0 >>> 0] != 0 }, this.fc = function(A, I) { this.cc(0), this.xc(A), this.wc(I), this.rc(), this.hc(!1), this.ic(!1) }, this.sc = function() { Atomics.add(p(), this.Sb >> 2, 1) }, this.Hc = function() { return Atomics.sub(p(), this.Sb >> 2, 1) === 1 }, this.cc = function(A) { l()[this.Sb + 16 >> 2 >>> 0] = A }, this.tc = function() { return l()[this.Sb + 16 >> 2 >>> 0] }, this.vc = function() { if (Kt(this.bc())) return l()[this.Zb >> 2 >>> 0]; var A = this.tc(); return A !== 0 ? A : this.Zb } } function ft(x) { return Ut(new oe(x).Sb) } function ot(x, A, I, F) { return P ? ee(3, 1, x, A, I, F) : pt(x, A, I, F) } function pt(x, A, I, F) { if (typeof SharedArrayBuffer > "u") return $("Current environment does not support SharedArrayBuffer, pthreads are not available!"), 6; var B = []; return P && B.length === 0 ? ot(x, A, I, F) : (x = { Ic: I, Ub: x, zc: F, Nc: B }, P ? (x.Oc = "spawnThread", postMessage(x, B), 0) : lt(x)) } function gt(x, A, I) { return P ? ee(4, 1, x, A, I) : 0 } function mt(x, A) { if (P) return ee(5, 1, x, A) } function bt(x, A) { if (P) return ee(6, 1, x, A) } function _t(x, A, I) { if (P) return ee(7, 1, x, A, I) } function yt(x, A, I) { return P ? ee(8, 1, x, A, I) : 0 } function wt(x, A) { if (P) return ee(9, 1, x, A) } function Tt(x, A, I) { if (P) return ee(10, 1, x, A, I) } function vt(x, A, I, F) { if (P) return ee(11, 1, x, A, I, F) } function xt(x, A, I, F) { if (P) return ee(12, 1, x, A, I, F) } function St(x, A, I, F) { if (P) return ee(13, 1, x, A, I, F) } function Ot(x) { if (P) return ee(14, 1, x) } function M(x, A) { if (P) return ee(15, 1, x, A) } function D(x, A, I) { if (P) return ee(16, 1, x, A, I) } function N(x) { Atomics.store(p(), x >> 2, 1), It() && Ht(x), Atomics.compareExchange(p(), x >> 2, 1, 0) } function j(x) { return l()[x >>> 2] + 4294967296 * p()[x + 4 >>> 2] } function U(x, A, I, F, B, G) { return P ? ee(17, 1, x, A, I, F, B, G) : -52 } function H(x, A, I, F, B, G) { if (P) return ee(18, 1, x, A, I, F, B, G) } function K(x) { var A = Fe(x) + 1, I = Lt(A); return I && Ne(x, s(), I, A), I } function te(x, A, I) { function F(ge) { return (ge = ge.toTimeString().match(/\(([A-Za-z ]+)\)$/)) ? ge[1] : "GMT" } if (P) return ee(19, 1, x, A, I); var B = new Date().getFullYear(), G = new Date(B, 0, 1), Q = new Date(B, 6, 1); B = G.getTimezoneOffset(); var ne = Q.getTimezoneOffset(), fe = Math.max(B, ne); p()[x >> 2 >>> 0] = 60 * fe, p()[A >> 2 >>> 0] = +(B != ne), x = F(G), A = F(Q), x = K(x), A = K(A), ne < B ? (l()[I >> 2 >>> 0] = x, l()[I + 4 >> 2 >>> 0] = A) : (l()[I >> 2 >>> 0] = A, l()[I + 4 >> 2 >>> 0] = x) } function ee(x, A) { var I = arguments.length - 2, F = arguments; return At(() => { for (var B = $t(8 * I), G = B >> 3, Q = 0; Q < I; Q++) { var ne = F[2 + Q]; o()[G + Q >>> 0] = ne } return Wt(x, I, B, A) }) } t.executeNotifiedProxyingQueue = N, ht = v ? () => { var x = process.hrtime(); return 1e3 * x[0] + x[1] / 1e6 } : P ? () => performance.now() - t.__performance_now_clock_drift : () => performance.now(); var le, xe = [], ke = {}; function Ce() { if (!le) { var x, A = { USER: "web_user", LOGNAME: "web_user", PATH: "/", PWD: "/", HOME: "/home/web_user", LANG: (typeof navigator == "object" && navigator.languages && navigator.languages[0] || "C").replace("-", "_") + ".UTF-8", _: w || "./this.program" }; for (x in ke) ke[x] === void 0 ? delete A[x] : A[x] = ke[x]; var I = []; for (x in A) I.push(x + "=" + A[x]); le = I } return le } function q(x, A) { if (P) return ee(20, 1, x, A); var I = 0; return Ce().forEach(function(F, B) { var G = A + I; for (B = l()[x + 4 * B >> 2 >>> 0] = G, G = 0; G < F.length; ++G) s()[B++ >> 0 >>> 0] = F.charCodeAt(G); s()[B >> 0 >>> 0] = 0, I += F.length + 1 }), 0 } function me(x, A) { if (P) return ee(21, 1, x, A); var I = Ce(); l()[x >> 2 >>> 0] = I.length; var F = 0; return I.forEach(function(B) { F += B.length + 1 }), l()[A >> 2 >>> 0] = F, 0 } function Ee(x) { return P ? ee(22, 1, x) : 52 } function qe(x, A, I, F) { return P ? ee(23, 1, x, A, I, F) : 52 } function Ze(x, A, I, F, B) { return P ? ee(24, 1, x, A, I, F, B) : 70 } var Ft = [null, [], [] ]; function kt(x, A) { var I = Ft[x]; A === 0 || A === 10 ? ((x === 1 ? C : $)(Le(I, 0)), I.length = 0) : I.push(A) } function Rt(x, A, I, F) { if (P) return ee(25, 1, x, A, I, F); for (var B = 0, G = 0; G < I; G++) { var Q = l()[A >> 2 >>> 0], ne = l()[A + 4 >> 2 >>> 0]; A += 8; for (var fe = 0; fe < ne; fe++) kt(x, h()[Q + fe >>> 0]); B += ne } return l()[F >> 2 >>> 0] = B, 0 } var je = 0; function Pt(x) { return x % 4 == 0 && (x % 100 != 0 || x % 400 == 0) } var jt = [31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31], Bt = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]; function Vt(x, A, I, F) { function B(W, _e, Pe) { for (W = typeof W == "number" ? W.toString() : W || ""; W.length < _e;) W = Pe[0] + W; return W } function G(W, _e) { return B(W, _e, "0") } function Q(W, _e) { function Pe(ct) { return 0 > ct ? -1 : 0 < ct ? 1 : 0 } var et; return (et = Pe(W.getFullYear() - _e.getFullYear())) === 0 && (et = Pe(W.getMonth() - _e.getMonth())) === 0 && (et = Pe(W.getDate() - _e.getDate())), et } function ne(W) { switch (W.getDay()) { case 0: return new Date(W.getFullYear() - 1, 11, 29); case 1: return W; case 2: return new Date(W.getFullYear(), 0, 3); case 3: return new Date(W.getFullYear(), 0, 2); case 4: return new Date(W.getFullYear(), 0, 1); case 5: return new Date(W.getFullYear() - 1, 11, 31); case 6: return new Date(W.getFullYear() - 1, 11, 30) } } function fe(W) { var _e = W.Wb; for (W = new Date(new Date(W.Xb + 1900, 0, 1).getTime()); 0 < _e;) { var Pe = W.getMonth(), et = (Pt(W.getFullYear()) ? jt : Bt)[Pe]; if (!(_e > et - W.getDate())) { W.setDate(W.getDate() + _e); break } _e -= et - W.getDate() + 1, W.setDate(1), 11 > Pe ? W.setMonth(Pe + 1) : (W.setMonth(0), W.setFullYear(W.getFullYear() + 1)) } return Pe = new Date(W.getFullYear() + 1, 0, 4), _e = ne(new Date(W.getFullYear(), 0, 4)), Pe = ne(Pe), 0 >= Q(_e, W) ? 0 >= Q(Pe, W) ? W.getFullYear() + 1 : W.getFullYear() : W.getFullYear() - 1 } var ge = p()[F + 40 >> 2 >>> 0]; for (var De in F = { Lc: p()[F >> 2 >>> 0], Kc: p()[F + 4 >> 2 >>> 0], dc: p()[F + 8 >> 2 >>> 0], jc: p()[F + 12 >> 2 >>> 0], ec: p()[F + 16 >> 2 >>> 0], Xb: p()[F + 20 >> 2 >>> 0], Tb: p()[F + 24 >> 2 >>> 0], Wb: p()[F + 28 >> 2 >>> 0], Rc: p()[F + 32 >> 2 >>> 0], Jc: p()[F + 36 >> 2 >>> 0], Mc: ge ? ve(ge) : "" }, I = ve(I), ge = { "%c": "%a %b %d %H:%M:%S %Y", "%D": "%m/%d/%y", "%F": "%Y-%m-%d", "%h": "%b", "%r": "%I:%M:%S %p", "%R": "%H:%M", "%T": "%H:%M:%S", "%x": "%m/%d/%y", "%X": "%H:%M:%S", "%Ec": "%c", "%EC": "%C", "%Ex": "%m/%d/%y", "%EX": "%H:%M:%S", "%Ey": "%y", "%EY": "%Y", "%Od": "%d", "%Oe": "%e", "%OH": "%H", "%OI": "%I", "%Om": "%m", "%OM": "%M", "%OS": "%S", "%Ou": "%u", "%OU": "%U", "%OV": "%V", "%Ow": "%w", "%OW": "%W", "%Oy": "%y" }) I = I.replace(new RegExp(De, "g"), ge[De]); var Xe = "Sunday Monday Tuesday Wednesday Thursday Friday Saturday".split(" "), We = "January February March April May June July August September October November December".split(" "); for (De in ge = { "%a": function(W) { return Xe[W.Tb].substring(0, 3) }, "%A": function(W) { return Xe[W.Tb] }, "%b": function(W) { return We[W.ec].substring(0, 3) }, "%B": function(W) { return We[W.ec] }, "%C": function(W) { return G((W.Xb + 1900) / 100 | 0, 2) }, "%d": function(W) { return G(W.jc, 2) }, "%e": function(W) { return B(W.jc, 2, " ") }, "%g": function(W) { return fe(W).toString().substring(2) }, "%G": function(W) { return fe(W) }, "%H": function(W) { return G(W.dc, 2) }, "%I": function(W) { return (W = W.dc) == 0 ? W = 12 : 12 < W && (W -= 12), G(W, 2) }, "%j": function(W) { for (var _e = 0, Pe = 0; Pe <= W.ec - 1; _e += (Pt(W.Xb + 1900) ? jt : Bt)[Pe++]); return G(W.jc + _e, 3) }, "%m": function(W) { return G(W.ec + 1, 2) }, "%M": function(W) { return G(W.Kc, 2) }, "%n": function() { return ` ` }, "%p": function(W) { return 0 <= W.dc && 12 > W.dc ? "AM" : "PM" }, "%S": function(W) { return G(W.Lc, 2) }, "%t": function() { return " " }, "%u": function(W) { return W.Tb || 7 }, "%U": function(W) { return G(Math.floor((W.Wb + 7 - W.Tb) / 7), 2) }, "%V": function(W) { var _e = Math.floor((W.Wb + 7 - (W.Tb + 6) % 7) / 7); if (2 >= (W.Tb + 371 - W.Wb - 2) % 7 && _e++, _e) _e == 53 && ((Pe = (W.Tb + 371 - W.Wb) % 7) == 4 || Pe == 3 && Pt(W.Xb) || (_e = 1)); else { _e = 52; var Pe = (W.Tb + 7 - W.Wb - 1) % 7; (Pe == 4 || Pe == 5 && Pt(W.Xb % 400 - 1)) && _e++ } return G(_e, 2) }, "%w": function(W) { return W.Tb }, "%W": function(W) { return G(Math.floor((W.Wb + 7 - (W.Tb + 6) % 7) / 7), 2) }, "%y": function(W) { return (W.Xb + 1900).toString().substring(2) }, "%Y": function(W) { return W.Xb + 1900 }, "%z": function(W) { var _e = 0 <= (W = W.Jc); return W = Math.abs(W) / 60, (_e ? "+" : "-") + ("0000" + (W / 60 * 100 + W % 60)).slice(-4) }, "%Z": function(W) { return W.Mc }, "%%": function() { return "%" } }, I = I.replace(/%%/g, "\0\0"), ge) I.includes(De) && (I = I.replace(new RegExp(De, "g"), ge[De](F))); return De = function(W) { var _e = Array(Fe(W) + 1); return Ne(W, _e, 0, _e.length), _e }(I = I.replace(/\0\0/g, "%")), De.length > A ? 0 : (function(W, _e) { s().set(W, _e >>> 0) }(De, x), De.length - 1) } re.fc(); var cn = [null, $e, Mt, ot, gt, mt, bt, _t, yt, wt, Tt, vt, xt, St, Ot, M, D, U, H, te, q, me, Ee, qe, Ze, Rt], dn = { b: function(x) { return Lt(x + 24) + 24 }, n: function(x) { return (x = new oe(x)).uc() || (x.hc(!0), ae--), x.ic(!1), it.push(x), x.sc(), x.vc() }, ma: function(x) { throw $("Unexpected exception thrown, this is not properly supported - aborting"), be = !0, x }, x: function() { he(0); var x = it.pop(); if (x.Hc() && !x.kc()) { var A = x.Dc(); A && we(A)(x.Zb), ft(x.Zb) } ie = 0 }, e: function() { var x = ie; if (!x) return je = 0; var A = new oe(x); A.cc(x); var I = A.bc(); if (!I) return je = 0, x; for (var F = Array.prototype.slice.call(arguments), B = 0; B < F.length; B++) { var G = F[B]; if (G === 0 || G === I) break; if (Ct(G, I, A.Sb + 16)) return je = G, x } return je = I, x }, l: function() { var x = ie; if (!x) return je = 0; var A = new oe(x); A.cc(x); var I = A.bc(); if (!I) return je = 0, x; for (var F = Array.prototype.slice.call(arguments), B = 0; B < F.length; B++) { var G = F[B]; if (G === 0 || G === I) break; if (Ct(G, I, A.Sb + 16)) return je = G, x } return je = I, x }, h: function() { var x = ie; if (!x) return je = 0; var A = new oe(x); A.cc(x); var I = A.bc(); if (!I) return je = 0, x; for (var F = Array.prototype.slice.call(arguments), B = 0; B < F.length; B++) { var G = F[B]; if (G === 0 || G === I) break; if (Ct(G, I, A.Sb + 16)) return je = G, x } return je = I, x }, t: ft, M: function() { var x = it.pop(); x || pe("no exception to throw"); var A = x.Zb; throw x.kc() || (it.push(x), x.ic(!0), x.hc(!1), ae++), ie = A, A }, c: function(x, A, I) { throw new oe(x).fc(A, I), ie = x, ae++, x }, pa: function() { return ae }, Fa: function(x) { qt(x, !E, 1, !O), re.pc() }, T: function(x) { P ? postMessage({ cmd: "cleanupThread", thread: x }) : ut(x) }, xa: pt, j: function(x) { throw ie || (ie = x), x }, H: gt, Ma: mt, ua: bt, wa: _t, oa: yt, Ka: wt, Ca: Tt, Ja: vt, V: xt, va: St, sa: Ot, La: M, ta: D, Ta: function() {}, X: function() { pe("To use dlopen, you need enable dynamic linking, see https://github.com/emscripten-core/emscripten/wiki/Linking") }, Ua: function() { pe("To use dlopen, you need enable dynamic linking, see https://github.com/emscripten-core/emscripten/wiki/Linking") }, W: function() { return Date.now() }, ya: function() { return 2097152 }, Oa: function() { return !0 }, za: function(x, A, I, F) { if (x == A) setTimeout(() => N(F)); else if (P) postMessage({ targetThread: x, cmd: "processProxyingQueue", queue: F }); else { if (!(x = re.Vb[x])) return; x.postMessage({ cmd: "processProxyingQueue", queue: F }) } return 1 }, Ea: function() { return -1 }, Pa: function(x, A) { x = new Date(1e3 * j(x)), p()[A >> 2 >>> 0] = x.getUTCSeconds(), p()[A + 4 >> 2 >>> 0] = x.getUTCMinutes(), p()[A + 8 >> 2 >>> 0] = x.getUTCHours(), p()[A + 12 >> 2 >>> 0] = x.getUTCDate(), p()[A + 16 >> 2 >>> 0] = x.getUTCMonth(), p()[A + 20 >> 2 >>> 0] = x.getUTCFullYear() - 1900, p()[A + 24 >> 2 >>> 0] = x.getUTCDay(), x = (x.getTime() - Date.UTC(x.getUTCFullYear(), 0, 1, 0, 0, 0, 0)) / 864e5 | 0, p()[A + 28 >> 2 >>> 0] = x }, Qa: function(x, A) { x = new Date(1e3 * j(x)), p()[A >> 2 >>> 0] = x.getSeconds(), p()[A + 4 >> 2 >>> 0] = x.getMinutes(), p()[A + 8 >> 2 >>> 0] = x.getHours(), p()[A + 12 >> 2 >>> 0] = x.getDate(), p()[A + 16 >> 2 >>> 0] = x.getMonth(), p()[A + 20 >> 2 >>> 0] = x.getFullYear() - 1900, p()[A + 24 >> 2 >>> 0] = x.getDay(); var I = new Date(x.getFullYear(), 0, 1), F = (x.getTime() - I.getTime()) / 864e5 | 0; p()[A + 28 >> 2 >>> 0] = F, p()[A + 36 >> 2 >>> 0] = -60 * x.getTimezoneOffset(), F = new Date(x.getFullYear(), 6, 1).getTimezoneOffset(), x = 0 | (F != (I = I.getTimezoneOffset()) && x.getTimezoneOffset() == Math.min(I, F)), p()[A + 32 >> 2 >>> 0] = x }, Ra: function(x) { var A = new Date(p()[x + 20 >> 2 >>> 0] + 1900, p()[x + 16 >> 2 >>> 0], p()[x + 12 >> 2 >>> 0], p()[x + 8 >> 2 >>> 0], p()[x + 4 >> 2 >>> 0], p()[x >> 2 >>> 0], 0), I = p()[x + 32 >> 2 >>> 0], F = A.getTimezoneOffset(), B = new Date(A.getFullYear(), 0, 1), G = new Date(A.getFullYear(), 6, 1).getTimezoneOffset(), Q = B.getTimezoneOffset(), ne = Math.min(Q, G); return 0 > I ? p()[x + 32 >> 2 >>> 0] = +(G != Q && ne == F) : 0 < I != (ne == F) && (G = Math.max(Q, G), A.setTime(A.getTime() + 6e4 * ((0 < I ? ne : G) - F))), p()[x + 24 >> 2 >>> 0] = A.getDay(), I = (A.getTime() - B.getTime()) / 864e5 | 0, p()[x + 28 >> 2 >>> 0] = I, p()[x >> 2 >>> 0] = A.getSeconds(), p()[x + 4 >> 2 >>> 0] = A.getMinutes(), p()[x + 8 >> 2 >>> 0] = A.getHours(), p()[x + 12 >> 2 >>> 0] = A.getDate(), p()[x + 16 >> 2 >>> 0] = A.getMonth(), A.getTime() / 1e3 | 0 }, Aa: U, Ba: H, Sa: function x(A, I, F) { x.Ac || (x.Ac = !0, te(A, I, F)) }, y: function() { pe("") }, U: function() { if (!v && !E) { var x = "Blocking on the main thread is very dangerous, see https://emscripten.org/docs/porting/pthreads.html#blocking-on-the-main-browser-thread"; rt || (rt = {}), rt[x] || (rt[x] = 1, v && (x = "warning: " + x), $(x)) } }, ra: function() { return 4294901760 }, B: ht, Ia: function(x, A, I) { h().copyWithin(x >>> 0, A >>> 0, A + I >>> 0) }, F: function() { return v ? a(3993).cpus().length : navigator.hardwareConcurrency }, Da: function(x, A, I) { xe.length = A, I >>= 3; for (var F = 0; F < A; F++) xe[F] = o()[I + F >>> 0]; return (0 > x ? Et[-x - 1] : cn[x]).apply(null, xe) }, qa: function(x) { var A = h().length; if ((x >>>= 0) <= A || 4294901760 < x) return !1; for (var I = 1; 4 >= I; I *= 2) { var F = A * (1 + .2 / I); F = Math.min(F, x + 100663296); var B = Math; F = Math.max(x, F), B = B.min.call(B, 4294901760, F + (65536 - F % 65536) % 65536); e: { try { z.grow(B - J.byteLength + 65535 >>> 16), Me(z.buffer); var G = 1; break e } catch {} G = void 0 } if (G) return !0 } return !1 }, Na: function() { throw "unwind" }, Ga: q, Ha: me, J: nt, I: Ee, S: qe, ga: Ze, R: Rt, d: function() { return je }, na: function x(A, I) { x.lc || (x.lc = function() { if (typeof crypto == "object" && typeof crypto.getRandomValues == "function") { var B = new Uint8Array(1); return () => (crypto.getRandomValues(B), B[0]) } if (v) try { var G = a(Object(function() { var Q = new Error("Cannot find module 'crypto'"); throw Q.code = "MODULE_NOT_FOUND", Q }())); return () => G.randomBytes(1)[0] } catch {} return () => pe("randomDevice") }()); for (var F = 0; F < I; F++) s()[A + F >> 0 >>> 0] = x.lc(); return 0 }, ia: function(x, A, I) { var F = de(); try { return we(x)(A, I) } catch (B) { if (ce(F), B !== B + 0) throw B; he(1, 0) } }, ja: function(x, A, I) { var F = de(); try { return we(x)(A, I) } catch (B) { if (ce(F), B !== B + 0) throw B; he(1, 0) } }, K: function(x) { var A = de(); try { return we(x)() } catch (I) { if (ce(A), I !== I + 0) throw I; he(1, 0) } }, f: function(x, A) { var I = de(); try { return we(x)(A) } catch (F) { if (ce(I), F !== F + 0) throw F; he(1, 0) } }, P: function(x, A, I) { var F = de(); try { return we(x)(A, I) } catch (B) { if (ce(F), B !== B + 0) throw B; he(1, 0) } }, Q: function(x, A, I) { var F = de(); try { return we(x)(A, I) } catch (B) { if (ce(F), B !== B + 0) throw B; he(1, 0) } }, k: function(x, A, I) { var F = de(); try { return we(x)(A, I) } catch (B) { if (ce(F), B !== B + 0) throw B; he(1, 0) } }, p: function(x, A, I, F) { var B = de(); try { return we(x)(A, I, F) } catch (G) { if (ce(B), G !== G + 0) throw G; he(1, 0) } }, q: function(x, A, I, F, B) { var G = de(); try { return we(x)(A, I, F, B) } catch (Q) { if (ce(G), Q !== Q + 0) throw Q; he(1, 0) } }, N: function(x, A, I, F, B, G) { var Q = de(); try { return we(x)(A, I, F, B, G) } catch (ne) { if (ce(Q), ne !== ne + 0) throw ne; he(1, 0) } }, s: function(x, A, I, F, B, G) { var Q = de(); try { return we(x)(A, I, F, B, G) } catch (ne) { if (ce(Q), ne !== ne + 0) throw ne; he(1, 0) } }, w: function(x, A, I, F, B, G, Q) { var ne = de(); try { return we(x)(A, I, F, B, G, Q) } catch (fe) { if (ce(ne), fe !== fe + 0) throw fe; he(1, 0) } }, L: function(x, A, I, F, B, G, Q, ne) { var fe = de(); try { return we(x)(A, I, F, B, G, Q, ne) } catch (ge) { if (ce(fe), ge !== ge + 0) throw ge; he(1, 0) } }, E: function(x, A, I, F, B, G, Q, ne, fe, ge, De, Xe) { var We = de(); try { return we(x)(A, I, F, B, G, Q, ne, fe, ge, De, Xe) } catch (W) { if (ce(We), W !== W + 0) throw W; he(1, 0) } }, aa: function(x, A, I, F, B, G, Q, ne) { var fe = de(); try { return sn(x, A, I, F, B, G, Q, ne) } catch (ge) { if (ce(fe), ge !== ge + 0) throw ge; he(1, 0) } }, _: function(x, A, I, F, B, G, Q) { var ne = de(); try { return Jt(x, A, I, F, B, G, Q) } catch (fe) { if (ce(ne), fe !== fe + 0) throw fe; he(1, 0) } }, Z: function(x, A, I, F, B) { var G = de(); try { return an(x, A, I, F, B) } catch (Q) { if (ce(G), Q !== Q + 0) throw Q; he(1, 0) } }, ca: function(x, A, I, F) { var B = de(); try { return rn(x, A, I, F) } catch (G) { if (ce(B), G !== G + 0) throw G; he(1, 0) } }, $: function(x) { var A = de(); try { return Qt(x) } catch (I) { if (ce(A), I !== I + 0) throw I; he(1, 0) } }, ba: function(x, A) { var I = de(); try { return on(x, A) } catch (F) { if (ce(I), F !== F + 0) throw F; he(1, 0) } }, Y: function(x, A, I) { var F = de(); try { return Zt(x, A, I) } catch (B) { if (ce(F), B !== B + 0) throw B; he(1, 0) } }, g: function(x) { var A = de(); try { we(x)() } catch (I) { if (ce(A), I !== I + 0) throw I; he(1, 0) } }, r: function(x, A) { var I = de(); try { we(x)(A) } catch (F) { if (ce(I), F !== F + 0) throw F; he(1, 0) } }, i: function(x, A, I) { var F = de(); try { we(x)(A, I) } catch (B) { if (ce(F), B !== B + 0) throw B; he(1, 0) } }, ha: function(x, A, I, F) { var B = de(); try { we(x)(A, I, F) } catch (G) { if (ce(B), G !== G + 0) throw G; he(1, 0) } }, m: function(x, A, I, F) { var B = de(); try { we(x)(A, I, F) } catch (G) { if (ce(B), G !== G + 0) throw G; he(1, 0) } }, v: function(x, A, I, F, B) { var G = de(); try { we(x)(A, I, F, B) } catch (Q) { if (ce(G), Q !== Q + 0) throw Q; he(1, 0) } }, u: function(x, A, I, F, B, G) { var Q = de(); try { we(x)(A, I, F, B, G) } catch (ne) { if (ce(Q), ne !== ne + 0) throw ne; he(1, 0) } }, O: function(x, A, I, F, B, G, Q) { var ne = de(); try { we(x)(A, I, F, B, G, Q) } catch (fe) { if (ce(ne), fe !== fe + 0) throw fe; he(1, 0) } }, A: function(x, A, I, F, B, G, Q, ne) { var fe = de(); try { we(x)(A, I, F, B, G, Q, ne) } catch (ge) { if (ce(fe), ge !== ge + 0) throw ge; he(1, 0) } }, ka: function(x, A, I, F, B, G, Q, ne, fe) { var ge = de(); try { we(x)(A, I, F, B, G, Q, ne, fe) } catch (De) { if (ce(ge), De !== De + 0) throw De; he(1, 0) } }, C: function(x, A, I, F, B, G, Q, ne, fe, ge, De) { var Xe = de(); try { we(x)(A, I, F, B, G, Q, ne, fe, ge, De) } catch (We) { if (ce(Xe), We !== We + 0) throw We; he(1, 0) } }, D: function(x, A, I, F, B, G, Q, ne, fe, ge, De, Xe, We, W, _e, Pe) { var et = de(); try { we(x)(A, I, F, B, G, Q, ne, fe, ge, De, Xe, We, W, _e, Pe) } catch (ct) { if (ce(et), ct !== ct + 0) throw ct; he(1, 0) } }, fa: function(x, A, I, F, B, G, Q, ne) { var fe = de(); try { en(x, A, I, F, B, G, Q, ne) } catch (ge) { if (ce(fe), ge !== ge + 0) throw ge; he(1, 0) } }, da: function(x, A, I, F, B, G, Q, ne, fe, ge, De, Xe) { var We = de(); try { nn(x, A, I, F, B, G, Q, ne, fe, ge, De, Xe) } catch (W) { if (ce(We), W !== W + 0) throw W; he(1, 0) } }, ea: function(x, A, I, F, B, G) { var Q = de(); try { tn(x, A, I, F, B, G) } catch (ne) { if (ce(Q), ne !== ne + 0) throw ne; he(1, 0) } }, o: function(x) { return x }, a: z || t.wasmMemory, G: function(x) { je = x }, la: Vt, z: function(x, A, I, F) { return Vt(x, A, I, F) } }; (function() { function x(B, G) { t.asm = B.exports, re.qc.push(t.asm.sb), Be = t.asm.ub, ze.unshift(t.asm.Va), Z = G, P || (Re--, t.monitorRunDependencies && t.monitorRunDependencies(Re), Re == 0 && Ye && (B = Ye, Ye = null, B())) } function A(B) { x(B.instance, B.module) } function I(B) { return function() { if (!Y && (O || E)) { if (typeof fetch == "function" && !Ae.startsWith("file://")) return fetch(Ae, { credentials: "same-origin" }).then(function(G) { if (!G.ok) throw "failed to load wasm binary file at '" + Ae + "'"; return G.arrayBuffer() }).catch(function() { return at() }); if (d) return new Promise(function(G, Q) { d(Ae, function(ne) { G(new Uint8Array(ne)) }, Q) }) } return Promise.resolve().then(function() { return at() }) }().then(function(G) { return WebAssembly.instantiate(G, F) }).then(function(G) { return G }).then(B, function(G) { $("failed to asynchronously prepare wasm: " + G), pe(G) }) } var F = { a: dn }; if (P || (Re++, t.monitorRunDependencies && t.monitorRunDependencies(Re)), t.instantiateWasm) try { return t.instantiateWasm(F, x) } catch (B) { return $("Module.instantiateWasm callback failed with error: " + B), !1 }(Y || typeof WebAssembly.instantiateStreaming != "function" || dt() || Ae.startsWith("file://") || v || typeof fetch != "function" ? I(A) : fetch(Ae, { credentials: "same-origin" }).then(function(B) { return WebAssembly.instantiateStreaming(B, F).then(A, function(G) { return $("wasm streaming compile failed: " + G), $("falling back to ArrayBuffer instantiation"), I(A) }) })).catch(r) })(), t.___wasm_call_ctors = function() { return (t.___wasm_call_ctors = t.asm.Va).apply(null, arguments) }, t._OrtInit = function() { return (t._OrtInit = t.asm.Wa).apply(null, arguments) }, t._OrtCreateSessionOptions = function() { return (t._OrtCreateSessionOptions = t.asm.Xa).apply(null, arguments) }, t._OrtAppendExecutionProvider = function() { return (t._OrtAppendExecutionProvider = t.asm.Ya).apply(null, arguments) }, t._OrtAddSessionConfigEntry = function() { return (t._OrtAddSessionConfigEntry = t.asm.Za).apply(null, arguments) }, t._OrtReleaseSessionOptions = function() { return (t._OrtReleaseSessionOptions = t.asm._a).apply(null, arguments) }, t._OrtCreateSession = function() { return (t._OrtCreateSession = t.asm.$a).apply(null, arguments) }, t._OrtReleaseSession = function() { return (t._OrtReleaseSession = t.asm.ab).apply(null, arguments) }, t._OrtGetInputCount = function() { return (t._OrtGetInputCount = t.asm.bb).apply(null, arguments) }, t._OrtGetOutputCount = function() { return (t._OrtGetOutputCount = t.asm.cb).apply(null, arguments) }, t._OrtGetInputName = function() { return (t._OrtGetInputName = t.asm.db).apply(null, arguments) }, t._OrtGetOutputName = function() { return (t._OrtGetOutputName = t.asm.eb).apply(null, arguments) }, t._OrtFree = function() { return (t._OrtFree = t.asm.fb).apply(null, arguments) }, t._OrtCreateTensor = function() { return (t._OrtCreateTensor = t.asm.gb).apply(null, arguments) }, t._OrtGetTensorData = function() { return (t._OrtGetTensorData = t.asm.hb).apply(null, arguments) }, t._OrtReleaseTensor = function() { return (t._OrtReleaseTensor = t.asm.ib).apply(null, arguments) }, t._OrtCreateRunOptions = function() { return (t._OrtCreateRunOptions = t.asm.jb).apply(null, arguments) }, t._OrtAddRunConfigEntry = function() { return (t._OrtAddRunConfigEntry = t.asm.kb).apply(null, arguments) }, t._OrtReleaseRunOptions = function() { return (t._OrtReleaseRunOptions = t.asm.lb).apply(null, arguments) }, t._OrtRun = function() { return (t._OrtRun = t.asm.mb).apply(null, arguments) }, t._OrtEndProfiling = function() { return (t._OrtEndProfiling = t.asm.nb).apply(null, arguments) }; var It = t._pthread_self = function() { return (It = t._pthread_self = t.asm.ob).apply(null, arguments) }, Lt = t._malloc = function() { return (Lt = t._malloc = t.asm.pb).apply(null, arguments) }, Ut = t._free = function() { return (Ut = t._free = t.asm.qb).apply(null, arguments) }, zt = t._fflush = function() { return (zt = t._fflush = t.asm.rb).apply(null, arguments) }; t.__emscripten_tls_init = function() { return (t.__emscripten_tls_init = t.asm.sb).apply(null, arguments) }; var Gt = t.___funcs_on_exit = function() { return (Gt = t.___funcs_on_exit = t.asm.tb).apply(null, arguments) }, qt = t.__emscripten_thread_init = function() { return (qt = t.__emscripten_thread_init = t.asm.vb).apply(null, arguments) }; t.__emscripten_thread_crashed = function() { return (t.__emscripten_thread_crashed = t.asm.wb).apply(null, arguments) }; var Dt, Wt = t._emscripten_run_in_main_runtime_thread_js = function() { return (Wt = t._emscripten_run_in_main_runtime_thread_js = t.asm.xb).apply(null, arguments) }, Ht = t.__emscripten_proxy_execute_task_queue = function() { return (Ht = t.__emscripten_proxy_execute_task_queue = t.asm.yb).apply(null, arguments) }, Nt = t.__emscripten_thread_free_data = function() { return (Nt = t.__emscripten_thread_free_data = t.asm.zb).apply(null, arguments) }, Yt = t.__emscripten_thread_exit = function() { return (Yt = t.__emscripten_thread_exit = t.asm.Ab).apply(null, arguments) }, he = t._setThrew = function() { return (he = t._setThrew = t.asm.Bb).apply(null, arguments) }, Xt = t._emscripten_stack_set_limits = function() { return (Xt = t._emscripten_stack_set_limits = t.asm.Cb).apply(null, arguments) }, de = t.stackSave = function() { return (de = t.stackSave = t.asm.Db).apply(null, arguments) }, ce = t.stackRestore = function() { return (ce = t.stackRestore = t.asm.Eb).apply(null, arguments) }, $t = t.stackAlloc = function() { return ($t = t.stackAlloc = t.asm.Fb).apply(null, arguments) }, Ct = t.___cxa_can_catch = function() { return (Ct = t.___cxa_can_catch = t.asm.Gb).apply(null, arguments) }, Kt = t.___cxa_is_pointer_type = function() { return (Kt = t.___cxa_is_pointer_type = t.asm.Hb).apply(null, arguments) }, Qt = t.dynCall_j = function() { return (Qt = t.dynCall_j = t.asm.Ib).apply(null, arguments) }, Jt = t.dynCall_iiiiij = function() { return (Jt = t.dynCall_iiiiij = t.asm.Jb).apply(null, arguments) }, Zt = t.dynCall_jii = function() { return (Zt = t.dynCall_jii = t.asm.Kb).apply(null, arguments) }, en = t.dynCall_viiiiij = function() { return (en = t.dynCall_viiiiij = t.asm.Lb).apply(null, arguments) }, tn = t.dynCall_vjji = function() { return (tn = t.dynCall_vjji = t.asm.Mb).apply(null, arguments) }, nn = t.dynCall_viiijjjii = function() { return (nn = t.dynCall_viiijjjii = t.asm.Nb).apply(null, arguments) }, rn = t.dynCall_iij = function() { return (rn = t.dynCall_iij = t.asm.Ob).apply(null, arguments) }, on = t.dynCall_ji = function() { return (on = t.dynCall_ji = t.asm.Pb).apply(null, arguments) }, sn = t.dynCall_iiiiiij = function() { return (sn = t.dynCall_iiiiiij = t.asm.Qb).apply(null, arguments) }, an = t.dynCall_iiij = function() { return (an = t.dynCall_iiij = t.asm.Rb).apply(null, arguments) }; function un() { function x() { if (!Dt && (Dt = !0, t.calledRun = !0, !be) && (P || tt(ze), e(t), t.onRuntimeInitialized && t.onRuntimeInitialized(), !P)) { if (t.postRun) for (typeof t.postRun == "function" && (t.postRun = [t.postRun]); t.postRun.length;) { var A = t.postRun.shift(); Ke.unshift(A) } tt(Ke) } } if (!(0 < Re)) if (P) e(t), P || tt(ze), postMessage({ cmd: "loaded" }); else { if (t.preRun) for (typeof t.preRun == "function" && (t.preRun = [t.preRun]); t.preRun.length;) Ve(); tt(Ue), 0 < Re || (t.setStatus ? (t.setStatus("Running..."), setTimeout(function() { setTimeout(function() { t.setStatus("") }, 1), x() }, 1)) : x()) } } if (t.UTF8ToString = ve, t.stringToUTF8 = function(x, A, I) { return Ne(x, h(), A, I) }, t.lengthBytesUTF8 = Fe, t.keepRuntimeAlive = Ge, t.wasmMemory = z, t.stackSave = de, t.stackRestore = ce, t.stackAlloc = $t, t.ExitStatus = Qe, t.PThread = re, Ye = function x() { Dt || un(), Dt || (Ye = x) }, t.preInit) for (typeof t.preInit == "function" && (t.preInit = [t.preInit]); 0 < t.preInit.length;) t.preInit.pop()(); return un(), f.ready }); b.exports = c }, 932: (b, n, a) => { var u, c = (u = (u = typeof document < "u" && document.currentScript ? document.currentScript.src : void 0) || "/index.js", function(f) { var s, h, p; f = f || {}, s || (s = f !== void 0 ? f : {}), s.ready = new Promise(function(M, D) { h = M, p = D }); var l, o, t, e, r, i, d = Object.assign({}, s), g = "./this.program", m = (M, D) => { throw D }, _ = typeof window == "object", y = typeof importScripts == "function", T = typeof process == "object" && typeof process.versions == "object" && typeof process.versions.node == "string", w = ""; T ? (w = y ? a(908).dirname(w) + "/" : "//", i = () => { r || (e = a(1384), r = a(908)) }, l = function(M, D) { return i(), M = r.normalize(M), e.readFileSync(M, D ? void 0 : "utf8") }, t = M => ((M = l(M, !0)).buffer || (M = new Uint8Array(M)), M), o = (M, D, N) => { i(), M = r.normalize(M), e.readFile(M, function(j, U) { j ? N(j) : D(U.buffer) }) }, 1 < process.argv.length && (g = process.argv[1].replace(/\\/g, "/")), process.argv.slice(2), process.on("uncaughtException", function(M) { if (!(M instanceof ze)) throw M }), process.on("unhandledRejection", function(M) { throw M }), m = (M, D) => { if (v || 0 < Ie) throw process.exitCode = M, D; D instanceof ze || E("exiting due to exception: " + D), process.exit(M) }, s.inspect = function() { return "[Emscripten Module object]" }) : (_ || y) && (y ? w = self.location.href : typeof document < "u" && document.currentScript && (w = document.currentScript.src), u && (w = u), w = w.indexOf("blob:") !== 0 ? w.substr(0, w.replace(/[?#].*/, "").lastIndexOf("/") + 1) : "", l = M => { var D = new XMLHttpRequest; return D.open("GET", M, !1), D.send(null), D.responseText }, y && (t = M => { var D = new XMLHttpRequest; return D.open("GET", M, !1), D.responseType = "arraybuffer", D.send(null), new Uint8Array(D.response) }), o = (M, D, N) => { var j = new XMLHttpRequest; j.open("GET", M, !0), j.responseType = "arraybuffer", j.onload = () => { j.status == 200 || j.status == 0 && j.response ? D(j.response) : N() }, j.onerror = N, j.send(null) }); var S, O = s.print || console.log.bind(console), E = s.printErr || console.warn.bind(console); Object.assign(s, d), d = null, s.thisProgram && (g = s.thisProgram), s.quit && (m = s.quit), s.wasmBinary && (S = s.wasmBinary); var v = s.noExitRuntime || !1; typeof WebAssembly != "object" && Me("no native wasm support detected"); var P, L, V, R, k, Y, C = !1, $ = typeof TextDecoder < "u" ? new TextDecoder("utf8") : void 0; function X(M, D, N) { var j = (D >>>= 0) + N; for (N = D; M[N] && !(N >= j);) ++N; if (16 < N - D && M.buffer && $) return $.decode(M.subarray(D, N)); for (j = ""; D < N;) { var U = M[D++]; if (128 & U) { var H = 63 & M[D++]; if ((224 & U) == 192) j += String.fromCharCode((31 & U) << 6 | H); else { var K = 63 & M[D++]; 65536 > (U = (240 & U) == 224 ? (15 & U) << 12 | H << 6 | K : (7 & U) << 18 | H << 12 | K << 6 | 63 & M[D++]) ? j += String.fromCharCode(U) : (U -= 65536, j += String.fromCharCode(55296 | U >> 10, 56320 | 1023 & U)) } } else j += String.fromCharCode(U) } return j } function z(M, D) { return (M >>>= 0) ? X(R, M, D) : "" } function Z(M, D, N, j) { if (!(0 < j)) return 0; var U = N >>>= 0; j = N + j - 1; for (var H = 0; H < M.length; ++H) { var K = M.charCodeAt(H); if (55296 <= K && 57343 >= K && (K = 65536 + ((1023 & K) << 10) | 1023 & M.charCodeAt(++H)), 127 >= K) { if (N >= j) break; D[N++ >>> 0] = K } else { if (2047 >= K) { if (N + 1 >= j) break; D[N++ >>> 0] = 192 | K >> 6 } else { if (65535 >= K) { if (N + 2 >= j) break; D[N++ >>> 0] = 224 | K >> 12 } else { if (N + 3 >= j) break; D[N++ >>> 0] = 240 | K >> 18, D[N++ >>> 0] = 128 | K >> 12 & 63 } D[N++ >>> 0] = 128 | K >> 6 & 63 } D[N++ >>> 0] = 128 | 63 & K } } return D[N >>> 0] = 0, N - U } function J(M) { for (var D = 0, N = 0; N < M.length; ++N) { var j = M.charCodeAt(N); 127 >= j ? D++ : 2047 >= j ? D += 2 : 55296 <= j && 57343 >= j ? (D += 4, ++N) : D += 3 } return D } function ue() { var M = P.buffer; L = M, s.HEAP8 = V = new Int8Array(M), s.HEAP16 = new Int16Array(M), s.HEAP32 = k = new Int32Array(M), s.HEAPU8 = R = new Uint8Array(M), s.HEAPU16 = new Uint16Array(M), s.HEAPU32 = Y = new Uint32Array(M), s.HEAPF32 = new Float32Array(M), s.HEAPF64 = new Float64Array(M) } var Se, Te = [], se = [], ye = [], be = [], Ie = 0; function Le() { var M = s.preRun.shift(); Te.unshift(M) } var ve, Ne = 0, Fe = null; function Me(M) { throw s.onAbort && s.onAbort(M), E(M = "Aborted(" + M + ")"), C = !0, M = new WebAssembly.RuntimeError(M + ". Build with -sASSERTIONS for more info."), p(M), M } function Oe() { return ve.startsWith("data:application/octet-stream;base64,") } if (ve = "ort-wasm.wasm", !Oe()) { var Be = ve; ve = s.locateFile ? s.locateFile(Be, w) : w + Be } function Ue() { var M = ve; try { if (M == ve && S) return new Uint8Array(S); if (t) return t(M); throw "both async and sync fetching of the wasm failed" } catch (D) { Me(D) } } function ze(M) { this.name = "ExitStatus", this.message = "Program terminated with exit(" + M + ")", this.status = M } function He(M) { for (; 0 < M.length;) M.shift()(s) } var Ke = [], Ge = 0, Ve = 0; function Ae(M) { this.Db = M, this.zb = M - 24, this.Ub = function(D) { Y[this.zb + 4 >> 2 >>> 0] = D }, this.Eb = function() { return Y[this.zb + 4 >> 2 >>> 0] }, this.Sb = function(D) { Y[this.zb + 8 >> 2 >>> 0] = D }, this.Wb = function() { return Y[this.zb + 8 >> 2 >>> 0] }, this.Tb = function() { k[this.zb >> 2 >>> 0] = 0 }, this.Ib = function(D) { V[this.zb + 12 >> 0 >>> 0] = D ? 1 : 0 }, this.Pb = function() { return V[this.zb + 12 >> 0 >>> 0] != 0 }, this.Jb = function(D) { V[this.zb + 13 >> 0 >>> 0] = D ? 1 : 0 }, this.Lb = function() { return V[this.zb + 13 >> 0 >>> 0] != 0 }, this.Rb = function(D, N) { this.Fb(0), this.Ub(D), this.Sb(N), this.Tb(), this.Ib(!1), this.Jb(!1) }, this.Nb = function() { k[this.zb >> 2 >>> 0] += 1 }, this.Xb = function() { var D = k[this.zb >> 2 >>> 0]; return k[this.zb >> 2 >>> 0] = D - 1, D === 1 }, this.Fb = function(D) { Y[this.zb + 16 >> 2 >>> 0] = D }, this.Ob = function() { return Y[this.zb + 16 >> 2 >>> 0] }, this.Qb = function() { if (pt(this.Eb())) return Y[this.Db >> 2 >>> 0]; var D = this.Ob(); return D !== 0 ? D : this.Db } } function Re(M) { return rt(new Ae(M).zb) } var Ye = []; function pe(M) { var D = Ye[M]; return D || (M >= Ye.length && (Ye.length = M + 1), Ye[M] = D = Se.get(M)), D } function dt(M) { var D = J(M) + 1, N = we(D); return N && Z(M, V, N, D), N } var at = {}; function Et() { if (!Qe) { var M, D = { USER: "web_user", LOGNAME: "web_user", PATH: "/", PWD: "/", HOME: "/home/web_user", LANG: (typeof navigator == "object" && navigator.languages && navigator.languages[0] || "C").replace("-", "_") + ".UTF-8", _: g || "./this.program" }; for (M in at) at[M] === void 0 ? delete D[M] : D[M] = at[M]; var N = []; for (M in D) N.push(M + "=" + D[M]); Qe = N } return Qe } var Qe, ut = [null, [], [] ]; function lt(M, D) { var N = ut[M]; D === 0 || D === 10 ? ((M === 1 ? O : E)(X(N, 0)), N.length = 0) : N.push(D) } var $e = 0; function nt(M) { return M % 4 == 0 && (M % 100 != 0 || M % 400 == 0) } var re = [31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31], tt = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]; function At(M, D, N, j) { function U(q, me, Ee) { for (q = typeof q == "number" ? q.toString() : q || ""; q.length < me;) q = Ee[0] + q; return q } function H(q, me) { return U(q, me, "0") } function K(q, me) { function Ee(Ze) { return 0 > Ze ? -1 : 0 < Ze ? 1 : 0 } var qe; return (qe = Ee(q.getFullYear() - me.getFullYear())) === 0 && (qe = Ee(q.getMonth() - me.getMonth())) === 0 && (qe = Ee(q.getDate() - me.getDate())), qe } function te(q) { switch (q.getDay()) { case 0: return new Date(q.getFullYear() - 1, 11, 29); case 1: return q; case 2: return new Date(q.getFullYear(), 0, 3); case 3: return new Date(q.getFullYear(), 0, 2); case 4: return new Date(q.getFullYear(), 0, 1); case 5: return new Date(q.getFullYear() - 1, 11, 31); case 6: return new Date(q.getFullYear() - 1, 11, 30) } } function ee(q) { var me = q.Bb; for (q = new Date(new Date(q.Cb + 1900, 0, 1).getTime()); 0 < me;) { var Ee = q.getMonth(), qe = (nt(q.getFullYear()) ? re : tt)[Ee]; if (!(me > qe - q.getDate())) { q.setDate(q.getDate() + me); break } me -= qe - q.getDate() + 1, q.setDate(1), 11 > Ee ? q.setMonth(Ee + 1) : (q.setMonth(0), q.setFullYear(q.getFullYear() + 1)) } return Ee = new Date(q.getFullYear() + 1, 0, 4), me = te(new Date(q.getFullYear(), 0, 4)), Ee = te(Ee), 0 >= K(me, q) ? 0 >= K(Ee, q) ? q.getFullYear() + 1 : q.getFullYear() : q.getFullYear() - 1 } var le = k[j + 40 >> 2 >>> 0]; for (var xe in j = { $b: k[j >> 2 >>> 0], Zb: k[j + 4 >> 2 >>> 0], Gb: k[j + 8 >> 2 >>> 0], Kb: k[j + 12 >> 2 >>> 0], Hb: k[j + 16 >> 2 >>> 0], Cb: k[j + 20 >> 2 >>> 0], Ab: k[j + 24 >> 2 >>> 0], Bb: k[j + 28 >> 2 >>> 0], bc: k[j + 32 >> 2 >>> 0], Yb: k[j + 36 >> 2 >>> 0], ac: le ? z(le) : "" }, N = z(N), le = { "%c": "%a %b %d %H:%M:%S %Y", "%D": "%m/%d/%y", "%F": "%Y-%m-%d", "%h": "%b", "%r": "%I:%M:%S %p", "%R": "%H:%M", "%T": "%H:%M:%S", "%x": "%m/%d/%y", "%X": "%H:%M:%S", "%Ec": "%c", "%EC": "%C", "%Ex": "%m/%d/%y", "%EX": "%H:%M:%S", "%Ey": "%y", "%EY": "%Y", "%Od": "%d", "%Oe": "%e", "%OH": "%H", "%OI": "%I", "%Om": "%m", "%OM": "%M", "%OS": "%S", "%Ou": "%u", "%OU": "%U", "%OV": "%V", "%Ow": "%w", "%OW": "%W", "%Oy": "%y" }) N = N.replace(new RegExp(xe, "g"), le[xe]); var ke = "Sunday Monday Tuesday Wednesday Thursday Friday Saturday".split(" "), Ce = "January February March April May June July August September October November December".split(" "); for (xe in le = { "%a": function(q) { return ke[q.Ab].substring(0, 3) }, "%A": function(q) { return ke[q.Ab] }, "%b": function(q) { return Ce[q.Hb].substring(0, 3) }, "%B": function(q) { return Ce[q.Hb] }, "%C": function(q) { return H((q.Cb + 1900) / 100 | 0, 2) }, "%d": function(q) { return H(q.Kb, 2) }, "%e": function(q) { return U(q.Kb, 2, " ") }, "%g": function(q) { return ee(q).toString().substring(2) }, "%G": function(q) { return ee(q) }, "%H": function(q) { return H(q.Gb, 2) }, "%I": function(q) { return (q = q.Gb) == 0 ? q = 12 : 12 < q && (q -= 12), H(q, 2) }, "%j": function(q) { for (var me = 0, Ee = 0; Ee <= q.Hb - 1; me += (nt(q.Cb + 1900) ? re : tt)[Ee++]); return H(q.Kb + me, 3) }, "%m": function(q) { return H(q.Hb + 1, 2) }, "%M": function(q) { return H(q.Zb, 2) }, "%n": function() { return ` ` }, "%p": function(q) { return 0 <= q.Gb && 12 > q.Gb ? "AM" : "PM" }, "%S": function(q) { return H(q.$b, 2) }, "%t": function() { return " " }, "%u": function(q) { return q.Ab || 7 }, "%U": function(q) { return H(Math.floor((q.Bb + 7 - q.Ab) / 7), 2) }, "%V": function(q) { var me = Math.floor((q.Bb + 7 - (q.Ab + 6) % 7) / 7); if (2 >= (q.Ab + 371 - q.Bb - 2) % 7 && me++, me) me == 53 && ((Ee = (q.Ab + 371 - q.Bb) % 7) == 4 || Ee == 3 && nt(q.Cb) || (me = 1)); else { me = 52; var Ee = (q.Ab + 7 - q.Bb - 1) % 7; (Ee == 4 || Ee == 5 && nt(q.Cb % 400 - 1)) && me++ } return H(me, 2) }, "%w": function(q) { return q.Ab }, "%W": function(q) { return H(Math.floor((q.Bb + 7 - (q.Ab + 6) % 7) / 7), 2) }, "%y": function(q) { return (q.Cb + 1900).toString().substring(2) }, "%Y": function(q) { return q.Cb + 1900 }, "%z": function(q) { var me = 0 <= (q = q.Yb); return q = Math.abs(q) / 60, (me ? "+" : "-") + ("0000" + (q / 60 * 100 + q % 60)).slice(-4) }, "%Z": function(q) { return q.ac }, "%%": function() { return "%" } }, N = N.replace(/%%/g, "\0\0"), le) N.includes(xe) && (N = N.replace(new RegExp(xe, "g"), le[xe](j))); return xe = function(q) { var me = Array(J(q) + 1); return Z(q, me, 0, me.length), me }(N = N.replace(/\0\0/g, "%")), xe.length > D ? 0 : (V.set(xe, M >>> 0), xe.length - 1) } var Mt = { a: function(M) { return we(M + 24) + 24 }, m: function(M) { return (M = new Ae(M)).Pb() || (M.Ib(!0), Ge--), M.Jb(!1), Ke.push(M), M.Nb(), M.Qb() }, ia: function(M) { throw E("Unexpected exception thrown, this is not properly supported - aborting"), C = !0, M }, w: function() { ae(0); var M = Ke.pop(); if (M.Xb() && !M.Lb()) { var D = M.Wb(); D && pe(D)(M.Db), Re(M.Db) } Ve = 0 }, d: function() { var M = Ve; if (!M) return $e = 0; var D = new Ae(M); D.Fb(M); var N = D.Eb(); if (!N) return $e = 0, M; for (var j = Array.prototype.slice.call(arguments), U = 0; U < j.length; U++) { var H = j[U]; if (H === 0 || H === N) break; if (ot(H, N, D.zb + 16)) return $e = H, M } return $e = N, M }, k: function() { var M = Ve; if (!M) return $e = 0; var D = new Ae(M); D.Fb(M); var N = D.Eb(); if (!N) return $e = 0, M; for (var j = Array.prototype.slice.call(arguments), U = 0; U < j.length; U++) { var H = j[U]; if (H === 0 || H === N) break; if (ot(H, N, D.zb + 16)) return $e = H, M } return $e = N, M }, g: function() { var M = Ve; if (!M) return $e = 0; var D = new Ae(M); D.Fb(M); var N = D.Eb(); if (!N) return $e = 0, M; for (var j = Array.prototype.slice.call(arguments), U = 0; U < j.length; U++) { var H = j[U]; if (H === 0 || H === N) break; if (ot(H, N, D.zb + 16)) return $e = H, M } return $e = N, M }, s: Re, L: function() { var M = Ke.pop(); M || Me("no exception to throw"); var D = M.Db; throw M.Lb() || (Ke.push(M), M.Jb(!0), M.Ib(!1), Ge++), Ve = D, D }, b: function(M, D, N) { throw new Ae(M).Rb(D, N), Ve = M, Ge++, M }, la: function() { return Ge }, i: function(M) { throw Ve || (Ve = M), M }, H: function() { return 0 }, Ba: function() {}, pa: function() {}, ra: function() {}, ka: function() { return 0 }, za: function() {}, ua: function() {}, ya: function() {}, R: function() {}, qa: function() {}, na: function() {}, Aa: function() {}, oa: function() {}, Ha: function() {}, Ja: function() { Me("To use dlopen, you need enable dynamic linking, see https://github.com/emscripten-core/emscripten/wiki/Linking") }, Ia: function() { Me("To use dlopen, you need enable dynamic linking, see https://github.com/emscripten-core/emscripten/wiki/Linking") }, S: function() { return Date.now() }, Ca: function() { return !0 }, Da: function(M, D) { M = new Date(1e3 * (Y[M >>> 2] + 4294967296 * k[M + 4 >>> 2])), k[D >> 2 >>> 0] = M.getUTCSeconds(), k[D + 4 >> 2 >>> 0] = M.getUTCMinutes(), k[D + 8 >> 2 >>> 0] = M.getUTCHours(), k[D + 12 >> 2 >>> 0] = M.getUTCDate(), k[D + 16 >> 2 >>> 0] = M.getUTCMonth(), k[D + 20 >> 2 >>> 0] = M.getUTCFullYear() - 1900, k[D + 24 >> 2 >>> 0] = M.getUTCDay(), k[D + 28 >> 2 >>> 0] = (M.getTime() - Date.UTC(M.getUTCFullYear(), 0, 1, 0, 0, 0, 0)) / 864e5 | 0 }, Ea: function(M, D) { M = new Date(1e3 * (Y[M >>> 2] + 4294967296 * k[M + 4 >>> 2])), k[D >> 2 >>> 0] = M.getSeconds(), k[D + 4 >> 2 >>> 0] = M.getMinutes(), k[D + 8 >> 2 >>> 0] = M.getHours(), k[D + 12 >> 2 >>> 0] = M.getDate(), k[D + 16 >> 2 >>> 0] = M.getMonth(), k[D + 20 >> 2 >>> 0] = M.getFullYear() - 1900, k[D + 24 >> 2 >>> 0] = M.getDay(); var N = new Date(M.getFullYear(), 0, 1); k[D + 28 >> 2 >>> 0] = (M.getTime() - N.getTime()) / 864e5 | 0, k[D + 36 >> 2 >>> 0] = -60 * M.getTimezoneOffset(); var j = new Date(M.getFullYear(), 6, 1).getTimezoneOffset(); N = N.getTimezoneOffset(), k[D + 32 >> 2 >>> 0] = 0 | (j != N && M.getTimezoneOffset() == Math.min(N, j)) }, Fa: function(M) { var D = new Date(k[M + 20 >> 2 >>> 0] + 1900, k[M + 16 >> 2 >>> 0], k[M + 12 >> 2 >>> 0], k[M + 8 >> 2 >>> 0], k[M + 4 >> 2 >>> 0], k[M >> 2 >>> 0], 0), N = k[M + 32 >> 2 >>> 0], j = D.getTimezoneOffset(), U = new Date(D.getFullYear(), 0, 1), H = new Date(D.getFullYear(), 6, 1).getTimezoneOffset(), K = U.getTimezoneOffset(), te = Math.min(K, H); return 0 > N ? k[M + 32 >> 2 >>> 0] = +(H != K && te == j) : 0 < N != (te == j) && (H = Math.max(K, H), D.setTime(D.getTime() + 6e4 * ((0 < N ? te : H) - j))), k[M + 24 >> 2 >>> 0] = D.getDay(), k[M + 28 >> 2 >>> 0] = (D.getTime() - U.getTime()) / 864e5 | 0, k[M >> 2 >>> 0] = D.getSeconds(), k[M + 4 >> 2 >>> 0] = D.getMinutes(), k[M + 8 >> 2 >>> 0] = D.getHours(), k[M + 12 >> 2 >>> 0] = D.getDate(), k[M + 16 >> 2 >>> 0] = D.getMonth(), D.getTime() / 1e3 | 0 }, sa: function() { return -52 }, ta: function() {}, Ga: function M(D, N, j) { M.Vb || (M.Vb = !0, function(U, H, K) { function te(Ce) { return (Ce = Ce.toTimeString().match(/\(([A-Za-z ]+)\)$/)) ? Ce[1] : "GMT" } var ee = new Date().getFullYear(), le = new Date(ee, 0, 1), xe = new Date(ee, 6, 1); ee = le.getTimezoneOffset(); var ke = xe.getTimezoneOffset(); k[U >> 2 >>> 0] = 60 * Math.max(ee, ke), k[H >> 2 >>> 0] = +(ee != ke), U = te(le), H = te(xe), U = dt(U), H = dt(H), ke < ee ? (Y[K >> 2 >>> 0] = U, Y[K + 4 >> 2 >>> 0] = H) : (Y[K >> 2 >>> 0] = H, Y[K + 4 >> 2 >>> 0] = U) }(D, N, j)) }, B: function() { Me("") }, ma: function() { return 4294901760 }, I: T ? () => { var M = process.hrtime(); return 1e3 * M[0] + M[1] / 1e6 } : () => performance.now(), xa: function(M, D, N) { R.copyWithin(M >>> 0, D >>> 0, D + N >>> 0) }, G: function(M) { var D = R.length; if (4294901760 < (M >>>= 0)) return !1; for (var N = 1; 4 >= N; N *= 2) { var j = D * (1 + .2 / N); j = Math.min(j, M + 100663296); var U = Math; j = Math.max(M, j), U = U.min.call(U, 4294901760, j + (65536 - j % 65536) % 65536); e: { try { P.grow(U - L.byteLength + 65535 >>> 16), ue(); var H = 1; break e } catch {} H = void 0 } if (H) return !0 } return !1 }, va: function(M, D) { var N = 0; return Et().forEach(function(j, U) { var H = D + N; for (U = Y[M + 4 * U >> 2 >>> 0] = H, H = 0; H < j.length; ++H) V[U++ >> 0 >>> 0] = j.charCodeAt(H); V[U >> 0 >>> 0] = 0, N += j.length + 1 }), 0 }, wa: function(M, D) { var N = Et(); Y[M >> 2 >>> 0] = N.length; var j = 0; return N.forEach(function(U) { j += U.length + 1 }), Y[D >> 2 >>> 0] = j, 0 }, ba: function(M) { v || 0 < Ie || (it(), He(ye), ht(0), ut[1].length && lt(1, 10), ut[2].length && lt(2, 10)), v || 0 < Ie || (s.onExit && s.onExit(M), C = !0), m(M, new ze(M)) }, E: function() { return 52 }, Q: function() { return 52 }, ca: function() { return 70 }, P: function(M, D, N, j) { for (var U = 0, H = 0; H < N; H++) { var K = Y[D >> 2 >>> 0], te = Y[D + 4 >> 2 >>> 0]; D += 8; for (var ee = 0; ee < te; ee++) lt(M, R[K + ee >>> 0]); U += te } return Y[j >> 2 >>> 0] = U, 0 }, c: function() { return $e }, ja: function M(D, N) { M.Mb || (M.Mb = function() { if (typeof crypto == "object" && typeof crypto.getRandomValues == "function") { var U = new Uint8Array(1); return () => (crypto.getRandomValues(U), U[0]) } if (T) try { var H = a(Object(function() { var K = new Error("Cannot find module 'crypto'"); throw K.code = "MODULE_NOT_FOUND", K }())); return () => H.randomBytes(1)[0] } catch {} return () => Me("randomDevice") }()); for (var j = 0; j < N; j++) V[D + j >> 0 >>> 0] = M.Mb(); return 0 }, ea: function(M, D, N) { var j = ie(); try { return pe(M)(D, N) } catch (U) { if (oe(j), U !== U + 0) throw U; ae(1, 0) } }, fa: function(M, D, N) { var j = ie(); try { return pe(M)(D, N) } catch (U) { if (oe(j), U !== U + 0) throw U; ae(1, 0) } }, J: function(M) { var D = ie(); try { return pe(M)() } catch (N) { if (oe(D), N !== N + 0) throw N; ae(1, 0) } }, e: function(M, D) { var N = ie(); try { return pe(M)(D) } catch (j) { if (oe(N), j !== j + 0) throw j; ae(1, 0) } }, N: function(M, D, N) { var j = ie(); try { return pe(M)(D, N) } catch (U) { if (oe(j), U !== U + 0) throw U; ae(1, 0) } }, O: function(M, D, N) { var j = ie(); try { return pe(M)(D, N) } catch (U) { if (oe(j), U !== U + 0) throw U; ae(1, 0) } }, j: function(M, D, N) { var j = ie(); try { return pe(M)(D, N) } catch (U) { if (oe(j), U !== U + 0) throw U; ae(1, 0) } }, o: function(M, D, N, j) { var U = ie(); try { return pe(M)(D, N, j) } catch (H) { if (oe(U), H !== H + 0) throw H; ae(1, 0) } }, p: function(M, D, N, j, U) { var H = ie(); try { return pe(M)(D, N, j, U) } catch (K) { if (oe(H), K !== K + 0) throw K; ae(1, 0) } }, M: function(M, D, N, j, U, H) { var K = ie(); try { return pe(M)(D, N, j, U, H) } catch (te) { if (oe(K), te !== te + 0) throw te; ae(1, 0) } }, r: function(M, D, N, j, U, H) { var K = ie(); try { return pe(M)(D, N, j, U, H) } catch (te) { if (oe(K), te !== te + 0) throw te; ae(1, 0) } }, v: function(M, D, N, j, U, H, K) { var te = ie(); try { return pe(M)(D, N, j, U, H, K) } catch (ee) { if (oe(te), ee !== ee + 0) throw ee; ae(1, 0) } }, K: function(M, D, N, j, U, H, K, te) { var ee = ie(); try { return pe(M)(D, N, j, U, H, K, te) } catch (le) { if (oe(ee), le !== le + 0) throw le; ae(1, 0) } }, D: function(M, D, N, j, U, H, K, te, ee, le, xe, ke) { var Ce = ie(); try { return pe(M)(D, N, j, U, H, K, te, ee, le, xe, ke) } catch (q) { if (oe(Ce), q !== q + 0) throw q; ae(1, 0) } }, X: function(M, D, N, j, U, H, K, te) { var ee = ie(); try { return xt(M, D, N, j, U, H, K, te) } catch (le) { if (oe(ee), le !== le + 0) throw le; ae(1, 0) } }, V: function(M, D, N, j, U, H, K) { var te = ie(); try { return mt(M, D, N, j, U, H, K) } catch (ee) { if (oe(te), ee !== ee + 0) throw ee; ae(1, 0) } }, U: function(M, D, N, j, U) { var H = ie(); try { return St(M, D, N, j, U) } catch (K) { if (oe(H), K !== K + 0) throw K; ae(1, 0) } }, Z: function(M, D, N, j) { var U = ie(); try { return Tt(M, D, N, j) } catch (H) { if (oe(U), H !== H + 0) throw H; ae(1, 0) } }, W: function(M) { var D = ie(); try { return gt(M) } catch (N) { if (oe(D), N !== N + 0) throw N; ae(1, 0) } }, Y: function(M, D) { var N = ie(); try { return vt(M, D) } catch (j) { if (oe(N), j !== j + 0) throw j; ae(1, 0) } }, T: function(M, D, N) { var j = ie(); try { return bt(M, D, N) } catch (U) { if (oe(j), U !== U + 0) throw U; ae(1, 0) } }, f: function(M) { var D = ie(); try { pe(M)() } catch (N) { if (oe(D), N !== N + 0) throw N; ae(1, 0) } }, q: function(M, D) { var N = ie(); try { pe(M)(D) } catch (j) { if (oe(N), j !== j + 0) throw j; ae(1, 0) } }, h: function(M, D, N) { var j = ie(); try { pe(M)(D, N) } catch (U) { if (oe(j), U !== U + 0) throw U; ae(1, 0) } }, da: function(M, D, N, j) { var U = ie(); try { pe(M)(D, N, j) } catch (H) { if (oe(U), H !== H + 0) throw H; ae(1, 0) } }, l: function(M, D, N, j) { var U = ie(); try { pe(M)(D, N, j) } catch (H) { if (oe(U), H !== H + 0) throw H; ae(1, 0) } }, t: function(M, D, N, j, U) { var H = ie(); try { pe(M)(D, N, j, U) } catch (K) { if (oe(H), K !== K + 0) throw K; ae(1, 0) } }, u: function(M, D, N, j, U, H) { var K = ie(); try { pe(M)(D, N, j, U, H) } catch (te) { if (oe(K), te !== te + 0) throw te; ae(1, 0) } }, x: function(M, D, N, j, U, H, K) { var te = ie(); try { pe(M)(D, N, j, U, H, K) } catch (ee) { if (oe(te), ee !== ee + 0) throw ee; ae(1, 0) } }, z: function(M, D, N, j, U, H, K, te) { var ee = ie(); try { pe(M)(D, N, j, U, H, K, te) } catch (le) { if (oe(ee), le !== le + 0) throw le; ae(1, 0) } }, ga: function(M, D, N, j, U, H, K, te, ee) { var le = ie(); try { pe(M)(D, N, j, U, H, K, te, ee) } catch (xe) { if (oe(le), xe !== xe + 0) throw xe; ae(1, 0) } }, A: function(M, D, N, j, U, H, K, te, ee, le, xe) { var ke = ie(); try { pe(M)(D, N, j, U, H, K, te, ee, le, xe) } catch (Ce) { if (oe(ke), Ce !== Ce + 0) throw Ce; ae(1, 0) } }, C: function(M, D, N, j, U, H, K, te, ee, le, xe, ke, Ce, q, me, Ee) { var qe = ie(); try { pe(M)(D, N, j, U, H, K, te, ee, le, xe, ke, Ce, q, me, Ee) } catch (Ze) { if (oe(qe), Ze !== Ze + 0) throw Ze; ae(1, 0) } }, aa: function(M, D, N, j, U, H, K, te) { var ee = ie(); try { _t(M, D, N, j, U, H, K, te) } catch (le) { if (oe(ee), le !== le + 0) throw le; ae(1, 0) } }, _: function(M, D, N, j, U, H, K, te, ee, le, xe, ke) { var Ce = ie(); try { wt(M, D, N, j, U, H, K, te, ee, le, xe, ke) } catch (q) { if (oe(Ce), q !== q + 0) throw q; ae(1, 0) } }, $: function(M, D, N, j, U, H) { var K = ie(); try { yt(M, D, N, j, U, H) } catch (te) { if (oe(K), te !== te + 0) throw te; ae(1, 0) } }, n: function(M) { return M }, F: function(M) { $e = M }, ha: At, y: function(M, D, N, j) { return At(M, D, N, j) } }; (function() { function M(U) { s.asm = U.exports, P = s.asm.Ka, ue(), Se = s.asm.ib, se.unshift(s.asm.La), Ne--, s.monitorRunDependencies && s.monitorRunDependencies(Ne), Ne == 0 && Fe && (U = Fe, Fe = null, U()) } function D(U) { M(U.instance) } function N(U) { return function() { if (!S && (_ || y)) { if (typeof fetch == "function" && !ve.startsWith("file://")) return fetch(ve, { credentials: "same-origin" }).then(function(H) { if (!H.ok) throw "failed to load wasm binary file at '" + ve + "'"; return H.arrayBuffer() }).catch(function() { return Ue() }); if (o) return new Promise(function(H, K) { o(ve, function(te) { H(new Uint8Array(te)) }, K) }) } return Promise.resolve().then(function() { return Ue() }) }().then(function(H) { return WebAssembly.instantiate(H, j) }).then(function(H) { return H }).then(U, function(H) { E("failed to asynchronously prepare wasm: " + H), Me(H) }) } var j = { a: Mt }; if (Ne++, s.monitorRunDependencies && s.monitorRunDependencies(Ne), s.instantiateWasm) try { return s.instantiateWasm(j, M) } catch (U) { return E("Module.instantiateWasm callback failed with error: " + U), !1 }(S || typeof WebAssembly.instantiateStreaming != "function" || Oe() || ve.startsWith("file://") || T || typeof fetch != "function" ? N(D) : fetch(ve, { credentials: "same-origin" }).then(function(U) { return WebAssembly.instantiateStreaming(U, j).then(D, function(H) { return E("wasm streaming compile failed: " + H), E("falling back to ArrayBuffer instantiation"), N(D) }) })).catch(p) })(), s.___wasm_call_ctors = function() { return (s.___wasm_call_ctors = s.asm.La).apply(null, arguments) }, s._OrtInit = function() { return (s._OrtInit = s.asm.Ma).apply(null, arguments) }, s._OrtCreateSessionOptions = function() { return (s._OrtCreateSessionOptions = s.asm.Na).apply(null, arguments) }, s._OrtAppendExecutionProvider = function() { return (s._OrtAppendExecutionProvider = s.asm.Oa).apply(null, arguments) }, s._OrtAddSessionConfigEntry = function() { return (s._OrtAddSessionConfigEntry = s.asm.Pa).apply(null, arguments) }, s._OrtReleaseSessionOptions = function() { return (s._OrtReleaseSessionOptions = s.asm.Qa).apply(null, arguments) }, s._OrtCreateSession = function() { return (s._OrtCreateSession = s.asm.Ra).apply(null, arguments) }, s._OrtReleaseSession = function() { return (s._OrtReleaseSession = s.asm.Sa).apply(null, arguments) }, s._OrtGetInputCount = function() { return (s._OrtGetInputCount = s.asm.Ta).apply(null, arguments) }, s._OrtGetOutputCount = function() { return (s._OrtGetOutputCount = s.asm.Ua).apply(null, arguments) }, s._OrtGetInputName = function() { return (s._OrtGetInputName = s.asm.Va).apply(null, arguments) }, s._OrtGetOutputName = function() { return (s._OrtGetOutputName = s.asm.Wa).apply(null, arguments) }, s._OrtFree = function() { return (s._OrtFree = s.asm.Xa).apply(null, arguments) }, s._OrtCreateTensor = function() { return (s._OrtCreateTensor = s.asm.Ya).apply(null, arguments) }, s._OrtGetTensorData = function() { return (s._OrtGetTensorData = s.asm.Za).apply(null, arguments) }, s._OrtReleaseTensor = function() { return (s._OrtReleaseTensor = s.asm._a).apply(null, arguments) }, s._OrtCreateRunOptions = function() { return (s._OrtCreateRunOptions = s.asm.$a).apply(null, arguments) }, s._OrtAddRunConfigEntry = function() { return (s._OrtAddRunConfigEntry = s.asm.ab).apply(null, arguments) }, s._OrtReleaseRunOptions = function() { return (s._OrtReleaseRunOptions = s.asm.bb).apply(null, arguments) }, s._OrtRun = function() { return (s._OrtRun = s.asm.cb).apply(null, arguments) }, s._OrtEndProfiling = function() { return (s._OrtEndProfiling = s.asm.db).apply(null, arguments) }; var Je, we = s._malloc = function() { return (we = s._malloc = s.asm.eb).apply(null, arguments) }, rt = s._free = function() { return (rt = s._free = s.asm.fb).apply(null, arguments) }, ht = s._fflush = function() { return (ht = s._fflush = s.asm.gb).apply(null, arguments) }, it = s.___funcs_on_exit = function() { return (it = s.___funcs_on_exit = s.asm.hb).apply(null, arguments) }, ae = s._setThrew = function() { return (ae = s._setThrew = s.asm.jb).apply(null, arguments) }, ie = s.stackSave = function() { return (ie = s.stackSave = s.asm.kb).apply(null, arguments) }, oe = s.stackRestore = function() { return (oe = s.stackRestore = s.asm.lb).apply(null, arguments) }, ft = s.stackAlloc = function() { return (ft = s.stackAlloc = s.asm.mb).apply(null, arguments) }, ot = s.___cxa_can_catch = function() { return (ot = s.___cxa_can_catch = s.asm.nb).apply(null, arguments) }, pt = s.___cxa_is_pointer_type = function() { return (pt = s.___cxa_is_pointer_type = s.asm.ob).apply(null, arguments) }, gt = s.dynCall_j = function() { return (gt = s.dynCall_j = s.asm.pb).apply(null, arguments) }, mt = s.dynCall_iiiiij = function() { return (mt = s.dynCall_iiiiij = s.asm.qb).apply(null, arguments) }, bt = s.dynCall_jii = function() { return (bt = s.dynCall_jii = s.asm.rb).apply(null, arguments) }, _t = s.dynCall_viiiiij = function() { return (_t = s.dynCall_viiiiij = s.asm.sb).apply(null, arguments) }, yt = s.dynCall_vjji = function() { return (yt = s.dynCall_vjji = s.asm.tb).apply(null, arguments) }, wt = s.dynCall_viiijjjii = function() { return (wt = s.dynCall_viiijjjii = s.asm.ub).apply(null, arguments) }, Tt = s.dynCall_iij = function() { return (Tt = s.dynCall_iij = s.asm.vb).apply(null, arguments) }, vt = s.dynCall_ji = function() { return (vt = s.dynCall_ji = s.asm.wb).apply(null, arguments) }, xt = s.dynCall_iiiiiij = function() { return (xt = s.dynCall_iiiiiij = s.asm.xb).apply(null, arguments) }, St = s.dynCall_iiij = function() { return (St = s.dynCall_iiij = s.asm.yb).apply(null, arguments) }; function Ot() { function M() { if (!Je && (Je = !0, s.calledRun = !0, !C)) { if (He(se), h(s), s.onRuntimeInitialized && s.onRuntimeInitialized(), s.postRun) for (typeof s.postRun == "function" && (s.postRun = [s.postRun]); s.postRun.length;) { var D = s.postRun.shift(); be.unshift(D) } He(be) } } if (!(0 < Ne)) { if (s.preRun) for (typeof s.preRun == "function" && (s.preRun = [s.preRun]); s.preRun.length;) Le(); He(Te), 0 < Ne || (s.setStatus ? (s.setStatus("Running..."), setTimeout(function() { setTimeout(function() { s.setStatus("") }, 1), M() }, 1)) : M()) } } if (s.UTF8ToString = z, s.stringToUTF8 = function(M, D, N) { return Z(M, R, D, N) }, s.lengthBytesUTF8 = J, s.stackSave = ie, s.stackRestore = oe, s.stackAlloc = ft, Fe = function M() { Je || Ot(), Je || (Fe = M) }, s.preInit) for (typeof s.preInit == "function" && (s.preInit = [s.preInit]); 0 < s.preInit.length;) s.preInit.pop()(); return Ot(), f.ready }); b.exports = c }, 4537: b => { b.exports = function(n, a) { for (var u = new Array(arguments.length - 1), c = 0, f = 2, s = !0; f < arguments.length;) u[c++] = arguments[f++]; return new Promise(function(h, p) { u[c] = function(l) { if (s) if (s = !1, l) p(l); else { for (var o = new Array(arguments.length - 1), t = 0; t < o.length;) o[t++] = arguments[t]; h.apply(null, o) } }; try { n.apply(a || null, u) } catch (l) { s && (s = !1, p(l)) } }) } }, 7419: (b, n) => { var a = n; a.length = function(h) { var p = h.length; if (!p) return 0; for (var l = 0; --p % 4 > 1 && h.charAt(p) === "=";) ++l; return Math.ceil(3 * h.length) / 4 - l }; for (var u = new Array(64), c = new Array(123), f = 0; f < 64;) c[u[f] = f < 26 ? f + 65 : f < 52 ? f + 71 : f < 62 ? f - 4 : f - 59 | 43] = f++; a.encode = function(h, p, l) { for (var o, t = null, e = [], r = 0, i = 0; p < l;) { var d = h[p++]; switch (i) { case 0: e[r++] = u[d >> 2], o = (3 & d) << 4, i = 1; break; case 1: e[r++] = u[o | d >> 4], o = (15 & d) << 2, i = 2; break; case 2: e[r++] = u[o | d >> 6], e[r++] = u[63 & d], i = 0 } r > 8191 && ((t || (t = [])).push(String.fromCharCode.apply(String, e)), r = 0) } return i && (e[r++] = u[o], e[r++] = 61, i === 1 && (e[r++] = 61)), t ? (r && t.push(String.fromCharCode.apply(String, e.slice(0, r))), t.join("")) : String.fromCharCode.apply(String, e.slice(0, r)) }; var s = "invalid encoding"; a.decode = function(h, p, l) { for (var o, t = l, e = 0, r = 0; r < h.length;) { var i = h.charCodeAt(r++); if (i === 61 && e > 1) break; if ((i = c[i]) === void 0) throw Error(s); switch (e) { case 0: o = i, e = 1; break; case 1: p[l++] = o << 2 | (48 & i) >> 4, o = i, e = 2; break; case 2: p[l++] = (15 & o) << 4 | (60 & i) >> 2, o = i, e = 3; break; case 3: p[l++] = (3 & o) << 6 | i, e = 0 } } if (e === 1) throw Error(s); return l - t }, a.test = function(h) { return /^(?:[A-Za-z0-9+/]{4})*(?:[A-Za-z0-9+/]{2}==|[A-Za-z0-9+/]{3}=)?$/.test(h) } }, 9211: b => { function n() { this._listeners = {} } b.exports = n, n.prototype.on = function(a, u, c) { return (this._listeners[a] || (this._listeners[a] = [])).push({ fn: u, ctx: c || this }), this }, n.prototype.off = function(a, u) { if (a === void 0) this._listeners = {}; else if (u === void 0) this._listeners[a] = []; else for (var c = this._listeners[a], f = 0; f < c.length;) c[f].fn === u ? c.splice(f, 1) : ++f; return this }, n.prototype.emit = function(a) { var u = this._listeners[a]; if (u) { for (var c = [], f = 1; f < arguments.length;) c.push(arguments[f++]); for (f = 0; f < u.length;) u[f].fn.apply(u[f++].ctx, c) } return this } }, 945: b => { function n(s) { return typeof Float32Array < "u" ? function() { var h = new Float32Array([-0]), p = new Uint8Array(h.buffer), l = p[3] === 128; function o(i, d, g) { h[0] = i, d[g] = p[0], d[g + 1] = p[1], d[g + 2] = p[2], d[g + 3] = p[3] } function t(i, d, g) { h[0] = i, d[g] = p[3], d[g + 1] = p[2], d[g + 2] = p[1], d[g + 3] = p[0] } function e(i, d) { return p[0] = i[d], p[1] = i[d + 1], p[2] = i[d + 2], p[3] = i[d + 3], h[0] } function r(i, d) { return p[3] = i[d], p[2] = i[d + 1], p[1] = i[d + 2], p[0] = i[d + 3], h[0] } s.writeFloatLE = l ? o : t, s.writeFloatBE = l ? t : o, s.readFloatLE = l ? e : r, s.readFloatBE = l ? r : e }() : function() { function h(l, o, t, e) { var r = o < 0 ? 1 : 0; if (r && (o = -o), o === 0) l(1 / o > 0 ? 0 : 2147483648, t, e); else if (isNaN(o)) l(2143289344, t, e); else if (o > 34028234663852886e22) l((r << 31 | 2139095040) >>> 0, t, e); else if (o < 11754943508222875e-54) l((r << 31 | Math.round(o / 1401298464324817e-60)) >>> 0, t, e); else { var i = Math.floor(Math.log(o) / Math.LN2); l((r << 31 | i + 127 << 23 | 8388607 & Math.round(o * Math.pow(2, -i) * 8388608)) >>> 0, t, e) } } function p(l, o, t) { var e = l(o, t), r = 2 * (e >> 31) + 1, i = e >>> 23 & 255, d = 8388607 & e; return i === 255 ? d ? NaN : r * (1 / 0) : i === 0 ? 1401298464324817e-60 * r * d : r * Math.pow(2, i - 150) * (d + 8388608) } s.writeFloatLE = h.bind(null, a), s.writeFloatBE = h.bind(null, u), s.readFloatLE = p.bind(null, c), s.readFloatBE = p.bind(null, f) }(), typeof Float64Array < "u" ? function() { var h = new Float64Array([-0]), p = new Uint8Array(h.buffer), l = p[7] === 128; function o(i, d, g) { h[0] = i, d[g] = p[0], d[g + 1] = p[1], d[g + 2] = p[2], d[g + 3] = p[3], d[g + 4] = p[4], d[g + 5] = p[5], d[g + 6] = p[6], d[g + 7] = p[7] } function t(i, d, g) { h[0] = i, d[g] = p[7], d[g + 1] = p[6], d[g + 2] = p[5], d[g + 3] = p[4], d[g + 4] = p[3], d[g + 5] = p[2], d[g + 6] = p[1], d[g + 7] = p[0] } function e(i, d) { return p[0] = i[d], p[1] = i[d + 1], p[2] = i[d + 2], p[3] = i[d + 3], p[4] = i[d + 4], p[5] = i[d + 5], p[6] = i[d + 6], p[7] = i[d + 7], h[0] } function r(i, d) { return p[7] = i[d], p[6] = i[d + 1], p[5] = i[d + 2], p[4] = i[d + 3], p[3] = i[d + 4], p[2] = i[d + 5], p[1] = i[d + 6], p[0] = i[d + 7], h[0] } s.writeDoubleLE = l ? o : t, s.writeDoubleBE = l ? t : o, s.readDoubleLE = l ? e : r, s.readDoubleBE = l ? r : e }() : function() { function h(l, o, t, e, r, i) { var d = e < 0 ? 1 : 0; if (d && (e = -e), e === 0) l(0, r, i + o), l(1 / e > 0 ? 0 : 2147483648, r, i + t); else if (isNaN(e)) l(0, r, i + o), l(2146959360, r, i + t); else if (e > 17976931348623157e292) l(0, r, i + o), l((d << 31 | 2146435072) >>> 0, r, i + t); else { var g; if (e < 22250738585072014e-324) l((g = e / 5e-324) >>> 0, r, i + o), l((d << 31 | g / 4294967296) >>> 0, r, i + t); else { var m = Math.floor(Math.log(e) / Math.LN2); m === 1024 && (m = 1023), l(4503599627370496 * (g = e * Math.pow(2, -m)) >>> 0, r, i + o), l((d << 31 | m + 1023 << 20 | 1048576 * g & 1048575) >>> 0, r, i + t) } } } function p(l, o, t, e, r) { var i = l(e, r + o), d = l(e, r + t), g = 2 * (d >> 31) + 1, m = d >>> 20 & 2047, _ = 4294967296 * (1048575 & d) + i; return m === 2047 ? _ ? NaN : g * (1 / 0) : m === 0 ? 5e-324 * g * _ : g * Math.pow(2, m - 1075) * (_ + 4503599627370496) } s.writeDoubleLE = h.bind(null, a, 0, 4), s.writeDoubleBE = h.bind(null, u, 4, 0), s.readDoubleLE = p.bind(null, c, 0, 4), s.readDoubleBE = p.bind(null, f, 4, 0) }(), s } function a(s, h, p) { h[p] = 255 & s, h[p + 1] = s >>> 8 & 255, h[p + 2] = s >>> 16 & 255, h[p + 3] = s >>> 24 } function u(s, h, p) { h[p] = s >>> 24, h[p + 1] = s >>> 16 & 255, h[p + 2] = s >>> 8 & 255, h[p + 3] = 255 & s } function c(s, h) { return (s[h] | s[h + 1] << 8 | s[h + 2] << 16 | s[h + 3] << 24) >>> 0 } function f(s, h) { return (s[h] << 24 | s[h + 1] << 16 | s[h + 2] << 8 | s[h + 3]) >>> 0 } b.exports = n(n) }, 7199: module => { function inquire(moduleName) { try { var mod = eval("quire".replace(/^/, "re"))(moduleName); if (mod && (mod.length || Object.keys(mod).length)) return mod } catch (b) {} return null } module.exports = inquire }, 6662: b => { b.exports = function(n, a, u) { var c = u || 8192, f = c >>> 1, s = null, h = c; return function(p) { if (p < 1 || p > f) return n(p); h + p > c && (s = n(c), h = 0); var l = a.call(s, h, h += p); return 7 & h && (h = 1 + (7 | h)), l } } }, 4997: (b, n) => { var a = n; a.length = function(u) { for (var c = 0, f = 0, s = 0; s < u.length; ++s)(f = u.charCodeAt(s)) < 128 ? c += 1 : f < 2048 ? c += 2 : (64512 & f) == 55296 && (64512 & u.charCodeAt(s + 1)) == 56320 ? (++s, c += 4) : c += 3; return c }, a.read = function(u, c, f) { if (f - c < 1) return ""; for (var s, h = null, p = [], l = 0; c < f;)(s = u[c++]) < 128 ? p[l++] = s : s > 191 && s < 224 ? p[l++] = (31 & s) << 6 | 63 & u[c++] : s > 239 && s < 365 ? (s = ((7 & s) << 18 | (63 & u[c++]) << 12 | (63 & u[c++]) << 6 | 63 & u[c++]) - 65536, p[l++] = 55296 + (s >> 10), p[l++] = 56320 + (1023 & s)) : p[l++] = (15 & s) << 12 | (63 & u[c++]) << 6 | 63 & u[c++], l > 8191 && ((h || (h = [])).push(String.fromCharCode.apply(String, p)), l = 0); return h ? (l && h.push(String.fromCharCode.apply(String, p.slice(0, l))), h.join("")) : String.fromCharCode.apply(String, p.slice(0, l)) }, a.write = function(u, c, f) { for (var s, h, p = f, l = 0; l < u.length; ++l)(s = u.charCodeAt(l)) < 128 ? c[f++] = s : s < 2048 ? (c[f++] = s >> 6 | 192, c[f++] = 63 & s | 128) : (64512 & s) == 55296 && (64512 & (h = u.charCodeAt(l + 1))) == 56320 ? (s = 65536 + ((1023 & s) << 10) + (1023 & h), ++l, c[f++] = s >> 18 | 240, c[f++] = s >> 12 & 63 | 128, c[f++] = s >> 6 & 63 | 128, c[f++] = 63 & s | 128) : (c[f++] = s >> 12 | 224, c[f++] = s >> 6 & 63 | 128, c[f++] = 63 & s | 128); return f - p } }, 3442: (b, n) => { n.__esModule = !0; var a = function() { function u(c) { if (!c) throw new TypeError("Invalid argument; `value` has no value."); this.value = u.EMPTY, c && u.isGuid(c) && (this.value = c) } return u.isGuid = function(c) { var f = c.toString(); return c && (c instanceof u || u.validator.test(f)) }, u.create = function() { return new u([u.gen(2), u.gen(1), u.gen(1), u.gen(1), u.gen(3)].join("-")) }, u.createEmpty = function() { return new u("emptyguid") }, u.parse = function(c) { return new u(c) }, u.raw = function() { return [u.gen(2), u.gen(1), u.gen(1), u.gen(1), u.gen(3)].join("-") }, u.gen = function(c) { for (var f = "", s = 0; s < c; s++) f += (65536 * (1 + Math.random()) | 0).toString(16).substring(1); return f }, u.prototype.equals = function(c) { return u.isGuid(c) && this.value === c.toString() }, u.prototype.isEmpty = function() { return this.value === u.EMPTY }, u.prototype.toString = function() { return this.value }, u.prototype.toJSON = function() { return { value: this.value } }, u.validator = new RegExp("^[a-z0-9]{8}-[a-z0-9]{4}-[a-z0-9]{4}-[a-z0-9]{4}-[a-z0-9]{12}$", "i"), u.EMPTY = "00000000-0000-0000-0000-000000000000", u }(); n.Guid = a }, 3720: b => { b.exports = a; var n = null; try { n = new WebAssembly.Instance(new WebAssembly.Module(new Uint8Array([0, 97, 115, 109, 1, 0, 0, 0, 1, 13, 2, 96, 0, 1, 127, 96, 4, 127, 127, 127, 127, 1, 127, 3, 7, 6, 0, 1, 1, 1, 1, 1, 6, 6, 1, 127, 1, 65, 0, 11, 7, 50, 6, 3, 109, 117, 108, 0, 1, 5, 100, 105, 118, 95, 115, 0, 2, 5, 100, 105, 118, 95, 117, 0, 3, 5, 114, 101, 109, 95, 115, 0, 4, 5, 114, 101, 109, 95, 117, 0, 5, 8, 103, 101, 116, 95, 104, 105, 103, 104, 0, 0, 10, 191, 1, 6, 4, 0, 35, 0, 11, 36, 1, 1, 126, 32, 0, 173, 32, 1, 173, 66, 32, 134, 132, 32, 2, 173, 32, 3, 173, 66, 32, 134, 132, 126, 34, 4, 66, 32, 135, 167, 36, 0, 32, 4, 167, 11, 36, 1, 1, 126, 32, 0, 173, 32, 1, 173, 66, 32, 134, 132, 32, 2, 173, 32, 3, 173, 66, 32, 134, 132, 127, 34, 4, 66, 32, 135, 167, 36, 0, 32, 4, 167, 11, 36, 1, 1, 126, 32, 0, 173, 32, 1, 173, 66, 32, 134, 132, 32, 2, 173, 32, 3, 173, 66, 32, 134, 132, 128, 34, 4, 66, 32, 135, 167, 36, 0, 32, 4, 167, 11, 36, 1, 1, 126, 32, 0, 173, 32, 1, 173, 66, 32, 134, 132, 32, 2, 173, 32, 3, 173, 66, 32, 134, 132, 129, 34, 4, 66, 32, 135, 167, 36, 0, 32, 4, 167, 11, 36, 1, 1, 126, 32, 0, 173, 32, 1, 173, 66, 32, 134, 132, 32, 2, 173, 32, 3, 173, 66, 32, 134, 132, 130, 34, 4, 66, 32, 135, 167, 36, 0, 32, 4, 167, 11])), {}).exports } catch {} function a(v, P, L) { this.low = 0 | v, this.high = 0 | P, this.unsigned = !!L } function u(v) { return (v && v.__isLong__) === !0 } a.prototype.__isLong__, Object.defineProperty(a.prototype, "__isLong__", { value: !0 }), a.isLong = u; var c = {}, f = {}; function s(v, P) { var L, V, R; return P ? (R = 0 <= (v >>>= 0) && v < 256) && (V = f[v]) ? V : (L = p(v, (0 | v) < 0 ? -1 : 0, !0), R && (f[v] = L), L) : (R = -128 <= (v |= 0) && v < 128) && (V = c[v]) ? V : (L = p(v, v < 0 ? -1 : 0, !1), R && (c[v] = L), L) } function h(v, P) { if (isNaN(v)) return P ? m : g; if (P) { if (v < 0) return m; if (v >= r) return S } else { if (v <= -i) return O; if (v + 1 >= i) return w } return v < 0 ? h(-v, P).neg() : p(v % e | 0, v / e | 0, P) } function p(v, P, L) { return new a(v, P, L) } a.fromInt = s, a.fromNumber = h, a.fromBits = p; var l = Math.pow; function o(v, P, L) { if (v.length === 0) throw Error("empty string"); if (v === "NaN" || v === "Infinity" || v === "+Infinity" || v === "-Infinity") return g; if (typeof P == "number" ? (L = P, P = !1) : P = !!P, (L = L || 10) < 2 || 36 < L) throw RangeError("radix"); var V; if ((V = v.indexOf("-")) > 0) throw Error("interior hyphen"); if (V === 0) return o(v.substring(1), P, L).neg(); for (var R = h(l(L, 8)), k = g, Y = 0; Y < v.length; Y += 8) { var C = Math.min(8, v.length - Y), $ = parseInt(v.substring(Y, Y + C), L); if (C < 8) { var X = h(l(L, C)); k = k.mul(X).add(h($)) } else k = (k = k.mul(R)).add(h($)) } return k.unsigned = P, k } function t(v, P) { return typeof v == "number" ? h(v, P) : typeof v == "string" ? o(v, P) : p(v.low, v.high, typeof P == "boolean" ? P : v.unsigned) } a.fromString = o, a.fromValue = t; var e = 4294967296, r = e * e, i = r / 2, d = s(1 << 24), g = s(0); a.ZERO = g; var m = s(0, !0); a.UZERO = m; var _ = s(1); a.ONE = _; var y = s(1, !0); a.UONE = y; var T = s(-1); a.NEG_ONE = T; var w = p(-1, 2147483647, !1); a.MAX_VALUE = w; var S = p(-1, -1, !0); a.MAX_UNSIGNED_VALUE = S; var O = p(0, -2147483648, !1); a.MIN_VALUE = O; var E = a.prototype; E.toInt = function() { return this.unsigned ? this.low >>> 0 : this.low }, E.toNumber = function() { return this.unsigned ? (this.high >>> 0) * e + (this.low >>> 0) : this.high * e + (this.low >>> 0) }, E.toString = function(v) { if ((v = v || 10) < 2 || 36 < v) throw RangeError("radix"); if (this.isZero()) return "0"; if (this.isNegative()) { if (this.eq(O)) { var P = h(v), L = this.div(P), V = L.mul(P).sub(this); return L.toString(v) + V.toInt().toString(v) } return "-" + this.neg().toString(v) } for (var R = h(l(v, 6), this.unsigned), k = this, Y = "";;) { var C = k.div(R), $ = (k.sub(C.mul(R)).toInt() >>> 0).toString(v); if ((k = C).isZero()) return $ + Y; for (; $.length < 6;) $ = "0" + $; Y = "" + $ + Y } }, E.getHighBits = function() { return this.high }, E.getHighBitsUnsigned = function() { return this.high >>> 0 }, E.getLowBits = function() { return this.low }, E.getLowBitsUnsigned = function() { return this.low >>> 0 }, E.getNumBitsAbs = function() { if (this.isNegative()) return this.eq(O) ? 64 : this.neg().getNumBitsAbs(); for (var v = this.high != 0 ? this.high : this.low, P = 31; P > 0 && !(v & 1 << P); P--); return this.high != 0 ? P + 33 : P + 1 }, E.isZero = function() { return this.high === 0 && this.low === 0 }, E.eqz = E.isZero, E.isNegative = function() { return !this.unsigned && this.high < 0 }, E.isPositive = function() { return this.unsigned || this.high >= 0 }, E.isOdd = function() { return (1 & this.low) == 1 }, E.isEven = function() { return (1 & this.low) == 0 }, E.equals = function(v) { return u(v) || (v = t(v)), (this.unsigned === v.unsigned || this.high >>> 31 != 1 || v.high >>> 31 != 1) && this.high === v.high && this.low === v.low }, E.eq = E.equals, E.notEquals = function(v) { return !this.eq(v) }, E.neq = E.notEquals, E.ne = E.notEquals, E.lessThan = function(v) { return this.comp(v) < 0 }, E.lt = E.lessThan, E.lessThanOrEqual = function(v) { return this.comp(v) <= 0 }, E.lte = E.lessThanOrEqual, E.le = E.lessThanOrEqual, E.greaterThan = function(v) { return this.comp(v) > 0 }, E.gt = E.greaterThan, E.greaterThanOrEqual = function(v) { return this.comp(v) >= 0 }, E.gte = E.greaterThanOrEqual, E.ge = E.greaterThanOrEqual, E.compare = function(v) { if (u(v) || (v = t(v)), this.eq(v)) return 0; var P = this.isNegative(), L = v.isNegative(); return P && !L ? -1 : !P && L ? 1 : this.unsigned ? v.high >>> 0 > this.high >>> 0 || v.high === this.high && v.low >>> 0 > this.low >>> 0 ? -1 : 1 : this.sub(v).isNegative() ? -1 : 1 }, E.comp = E.compare, E.negate = function() { return !this.unsigned && this.eq(O) ? O : this.not().add(_) }, E.neg = E.negate, E.add = function(v) { u(v) || (v = t(v)); var P = this.high >>> 16, L = 65535 & this.high, V = this.low >>> 16, R = 65535 & this.low, k = v.high >>> 16, Y = 65535 & v.high, C = v.low >>> 16, $ = 0, X = 0, z = 0, Z = 0; return z += (Z += R + (65535 & v.low)) >>> 16, X += (z += V + C) >>> 16, $ += (X += L + Y) >>> 16, $ += P + k, p((z &= 65535) << 16 | (Z &= 65535), ($ &= 65535) << 16 | (X &= 65535), this.unsigned) }, E.subtract = function(v) { return u(v) || (v = t(v)), this.add(v.neg()) }, E.sub = E.subtract, E.multiply = function(v) { if (this.isZero()) return g; if (u(v) || (v = t(v)), n) return p(n.mul(this.low, this.high, v.low, v.high), n.get_high(), this.unsigned); if (v.isZero()) return g; if (this.eq(O)) return v.isOdd() ? O : g; if (v.eq(O)) return this.isOdd() ? O : g; if (this.isNegative()) return v.isNegative() ? this.neg().mul(v.neg()) : this.neg().mul(v).neg(); if (v.isNegative()) return this.mul(v.neg()).neg(); if (this.lt(d) && v.lt(d)) return h(this.toNumber() * v.toNumber(), this.unsigned); var P = this.high >>> 16, L = 65535 & this.high, V = this.low >>> 16, R = 65535 & this.low, k = v.high >>> 16, Y = 65535 & v.high, C = v.low >>> 16, $ = 65535 & v.low, X = 0, z = 0, Z = 0, J = 0; return Z += (J += R * $) >>> 16, z += (Z += V * $) >>> 16, Z &= 65535, z += (Z += R * C) >>> 16, X += (z += L * $) >>> 16, z &= 65535, X += (z += V * C) >>> 16, z &= 65535, X += (z += R * Y) >>> 16, X += P * $ + L * C + V * Y + R * k, p((Z &= 65535) << 16 | (J &= 65535), (X &= 65535) << 16 | (z &= 65535), this.unsigned) }, E.mul = E.multiply, E.divide = function(v) { if (u(v) || (v = t(v)), v.isZero()) throw Error("division by zero"); var P, L, V; if (n) return this.unsigned || this.high !== -2147483648 || v.low !== -1 || v.high !== -1 ? p((this.unsigned ? n.div_u : n.div_s)(this.low, this.high, v.low, v.high), n.get_high(), this.unsigned) : this; if (this.isZero()) return this.unsigned ? m : g; if (this.unsigned) { if (v.unsigned || (v = v.toUnsigned()), v.gt(this)) return m; if (v.gt(this.shru(1))) return y; V = m } else { if (this.eq(O)) return v.eq(_) || v.eq(T) ? O : v.eq(O) ? _ : (P = this.shr(1).div(v).shl(1)).eq(g) ? v.isNegative() ? _ : T : (L = this.sub(v.mul(P)), V = P.add(L.div(v))); if (v.eq(O)) return this.unsigned ? m : g; if (this.isNegative()) return v.isNegative() ? this.neg().div(v.neg()) : this.neg().div(v).neg(); if (v.isNegative()) return this.div(v.neg()).neg(); V = g } for (L = this; L.gte(v);) { P = Math.max(1, Math.floor(L.toNumber() / v.toNumber())); for (var R = Math.ceil(Math.log(P) / Math.LN2), k = R <= 48 ? 1 : l(2, R - 48), Y = h(P), C = Y.mul(v); C.isNegative() || C.gt(L);) C = (Y = h(P -= k, this.unsigned)).mul(v); Y.isZero() && (Y = _), V = V.add(Y), L = L.sub(C) } return V }, E.div = E.divide, E.modulo = function(v) { return u(v) || (v = t(v)), n ? p((this.unsigned ? n.rem_u : n.rem_s)(this.low, this.high, v.low, v.high), n.get_high(), this.unsigned) : this.sub(this.div(v).mul(v)) }, E.mod = E.modulo, E.rem = E.modulo, E.not = function() { return p(~this.low, ~this.high, this.unsigned) }, E.and = function(v) { return u(v) || (v = t(v)), p(this.low & v.low, this.high & v.high, this.unsigned) }, E.or = function(v) { return u(v) || (v = t(v)), p(this.low | v.low, this.high | v.high, this.unsigned) }, E.xor = function(v) { return u(v) || (v = t(v)), p(this.low ^ v.low, this.high ^ v.high, this.unsigned) }, E.shiftLeft = function(v) { return u(v) && (v = v.toInt()), (v &= 63) == 0 ? this : v < 32 ? p(this.low << v, this.high << v | this.low >>> 32 - v, this.unsigned) : p(0, this.low << v - 32, this.unsigned) }, E.shl = E.shiftLeft, E.shiftRight = function(v) { return u(v) && (v = v.toInt()), (v &= 63) == 0 ? this : v < 32 ? p(this.low >>> v | this.high << 32 - v, this.high >> v, this.unsigned) : p(this.high >> v - 32, this.high >= 0 ? 0 : -1, this.unsigned) }, E.shr = E.shiftRight, E.shiftRightUnsigned = function(v) { if (u(v) && (v = v.toInt()), (v &= 63) == 0) return this; var P = this.high; return v < 32 ? p(this.low >>> v | P << 32 - v, P >>> v, this.unsigned) : p(v === 32 ? P : P >>> v - 32, 0, this.unsigned) }, E.shru = E.shiftRightUnsigned, E.shr_u = E.shiftRightUnsigned, E.toSigned = function() { return this.unsigned ? p(this.low, this.high, !1) : this }, E.toUnsigned = function() { return this.unsigned ? this : p(this.low, this.high, !0) }, E.toBytes = function(v) { return v ? this.toBytesLE() : this.toBytesBE() }, E.toBytesLE = function() { var v = this.high, P = this.low; return [255 & P, P >>> 8 & 255, P >>> 16 & 255, P >>> 24, 255 & v, v >>> 8 & 255, v >>> 16 & 255, v >>> 24] }, E.toBytesBE = function() { var v = this.high, P = this.low; return [v >>> 24, v >>> 16 & 255, v >>> 8 & 255, 255 & v, P >>> 24, P >>> 16 & 255, P >>> 8 & 255, 255 & P] }, a.fromBytes = function(v, P, L) { return L ? a.fromBytesLE(v, P) : a.fromBytesBE(v, P) }, a.fromBytesLE = function(v, P) { return new a(v[0] | v[1] << 8 | v[2] << 16 | v[3] << 24, v[4] | v[5] << 8 | v[6] << 16 | v[7] << 24, P) }, a.fromBytesBE = function(v, P) { return new a(v[4] << 24 | v[5] << 16 | v[6] << 8 | v[7], v[0] << 24 | v[1] << 16 | v[2] << 8 | v[3], P) } }, 1446: (b, n, a) => { var u, c, f, s = a(2100), h = s.Reader, p = s.Writer, l = s.util, o = s.roots.default || (s.roots.default = {}); o.onnx = ((f = {}).Version = (u = {}, (c = Object.create(u))[u[0] = "_START_VERSION"] = 0, c[u[1] = "IR_VERSION_2017_10_10"] = 1, c[u[2] = "IR_VERSION_2017_10_30"] = 2, c[u[3] = "IR_VERSION_2017_11_3"] = 3, c[u[4] = "IR_VERSION_2019_1_22"] = 4, c[u[5] = "IR_VERSION"] = 5, c), f.AttributeProto = function() { function t(e) { if (this.floats = [], this.ints = [], this.strings = [], this.tensors = [], this.graphs = [], e) for (var r = Object.keys(e), i = 0; i < r.length; ++i) e[r[i]] != null && (this[r[i]] = e[r[i]]) } return t.prototype.name = "", t.prototype.refAttrName = "", t.prototype.docString = "", t.prototype.type = 0, t.prototype.f = 0, t.prototype.i = l.Long ? l.Long.fromBits(0, 0, !1) : 0, t.prototype.s = l.newBuffer([]), t.prototype.t = null, t.prototype.g = null, t.prototype.floats = l.emptyArray, t.prototype.ints = l.emptyArray, t.prototype.strings = l.emptyArray, t.prototype.tensors = l.emptyArray, t.prototype.graphs = l.emptyArray, t.create = function(e) { return new t(e) }, t.encode = function(e, r) { if (r || (r = p.create()), e.name != null && e.hasOwnProperty("name") && r.uint32(10).string(e.name), e.f != null && e.hasOwnProperty("f") && r.uint32(21).float(e.f), e.i != null && e.hasOwnProperty("i") && r.uint32(24).int64(e.i), e.s != null && e.hasOwnProperty("s") && r.uint32(34).bytes(e.s), e.t != null && e.hasOwnProperty("t") && o.onnx.TensorProto.encode(e.t, r.uint32(42).fork()).ldelim(), e.g != null && e.hasOwnProperty("g") && o.onnx.GraphProto.encode(e.g, r.uint32(50).fork()).ldelim(), e.floats != null && e.floats.length) { r.uint32(58).fork(); for (var i = 0; i < e.floats.length; ++i) r.float(e.floats[i]); r.ldelim() } if (e.ints != null && e.ints.length) { for (r.uint32(66).fork(), i = 0; i < e.ints.length; ++i) r.int64(e.ints[i]); r.ldelim() } if (e.strings != null && e.strings.length) for (i = 0; i < e.strings.length; ++i) r.uint32(74).bytes(e.strings[i]); if (e.tensors != null && e.tensors.length) for (i = 0; i < e.tensors.length; ++i) o.onnx.TensorProto.encode(e.tensors[i], r.uint32(82).fork()).ldelim(); if (e.graphs != null && e.graphs.length) for (i = 0; i < e.graphs.length; ++i) o.onnx.GraphProto.encode(e.graphs[i], r.uint32(90).fork()).ldelim(); return e.docString != null && e.hasOwnProperty("docString") && r.uint32(106).string(e.docString), e.type != null && e.hasOwnProperty("type") && r.uint32(160).int32(e.type), e.refAttrName != null && e.hasOwnProperty("refAttrName") && r.uint32(170).string(e.refAttrName), r }, t.encodeDelimited = function(e, r) { return this.encode(e, r).ldelim() }, t.decode = function(e, r) { e instanceof h || (e = h.create(e)); for (var i = r === void 0 ? e.len : e.pos + r, d = new o.onnx.AttributeProto; e.pos < i;) { var g = e.uint32(); switch (g >>> 3) { case 1: d.name = e.string(); break; case 21: d.refAttrName = e.string(); break; case 13: d.docString = e.string(); break; case 20: d.type = e.int32(); break; case 2: d.f = e.float(); break; case 3: d.i = e.int64(); break; case 4: d.s = e.bytes(); break; case 5: d.t = o.onnx.TensorProto.decode(e, e.uint32()); break; case 6: d.g = o.onnx.GraphProto.decode(e, e.uint32()); break; case 7: if (d.floats && d.floats.length || (d.floats = []), (7 & g) == 2) for (var m = e.uint32() + e.pos; e.pos < m;) d.floats.push(e.float()); else d.floats.push(e.float()); break; case 8: if (d.ints && d.ints.length || (d.ints = []), (7 & g) == 2) for (m = e.uint32() + e.pos; e.pos < m;) d.ints.push(e.int64()); else d.ints.push(e.int64()); break; case 9: d.strings && d.strings.length || (d.strings = []), d.strings.push(e.bytes()); break; case 10: d.tensors && d.tensors.length || (d.tensors = []), d.tensors.push(o.onnx.TensorProto.decode(e, e.uint32())); break; case 11: d.graphs && d.graphs.length || (d.graphs = []), d.graphs.push(o.onnx.GraphProto.decode(e, e.uint32())); break; default: e.skipType(7 & g) } } return d }, t.decodeDelimited = function(e) { return e instanceof h || (e = new h(e)), this.decode(e, e.uint32()) }, t.verify = function(e) { if (typeof e != "object" || e === null) return "object expected"; if (e.name != null && e.hasOwnProperty("name") && !l.isString(e.name)) return "name: string expected"; if (e.refAttrName != null && e.hasOwnProperty("refAttrName") && !l.isString(e.refAttrName)) return "refAttrName: string expected"; if (e.docString != null && e.hasOwnProperty("docString") && !l.isString(e.docString)) return "docString: string expected"; if (e.type != null && e.hasOwnProperty("type")) switch (e.type) { default: return "type: enum value expected"; case 0: case 1: case 2: case 3: case 4: case 5: case 6: case 7: case 8: case 9: case 10: } if (e.f != null && e.hasOwnProperty("f") && typeof e.f != "number") return "f: number expected"; if (e.i != null && e.hasOwnProperty("i") && !(l.isInteger(e.i) || e.i && l.isInteger(e.i.low) && l.isInteger(e.i.high))) return "i: integer|Long expected"; if (e.s != null && e.hasOwnProperty("s") && !(e.s && typeof e.s.length == "number" || l.isString(e.s))) return "s: buffer expected"; if (e.t != null && e.hasOwnProperty("t") && (i = o.onnx.TensorProto.verify(e.t))) return "t." + i; if (e.g != null && e.hasOwnProperty("g") && (i = o.onnx.GraphProto.verify(e.g))) return "g." + i; if (e.floats != null && e.hasOwnProperty("floats")) { if (!Array.isArray(e.floats)) return "floats: array expected"; for (var r = 0; r < e.floats.length; ++r) if (typeof e.floats[r] != "number") return "floats: number[] expected" } if (e.ints != null && e.hasOwnProperty("ints")) { if (!Array.isArray(e.ints)) return "ints: array expected"; for (r = 0; r < e.ints.length; ++r) if (!(l.isInteger(e.ints[r]) || e.ints[r] && l.isInteger(e.ints[r].low) && l.isInteger(e.ints[r].high))) return "ints: integer|Long[] expected" } if (e.strings != null && e.hasOwnProperty("strings")) { if (!Array.isArray(e.strings)) return "strings: array expected"; for (r = 0; r < e.strings.length; ++r) if (!(e.strings[r] && typeof e.strings[r].length == "number" || l.isString(e.strings[r]))) return "strings: buffer[] expected" } if (e.tensors != null && e.hasOwnProperty("tensors")) { if (!Array.isArray(e.tensors)) return "tensors: array expected"; for (r = 0; r < e.tensors.length; ++r) if (i = o.onnx.TensorProto.verify(e.tensors[r])) return "tensors." + i } if (e.graphs != null && e.hasOwnProperty("graphs")) { if (!Array.isArray(e.graphs)) return "graphs: array expected"; for (r = 0; r < e.graphs.length; ++r) { var i; if (i = o.onnx.GraphProto.verify(e.graphs[r])) return "graphs." + i } } return null }, t.fromObject = function(e) { if (e instanceof o.onnx.AttributeProto) return e; var r = new o.onnx.AttributeProto; switch (e.name != null && (r.name = String(e.name)), e.refAttrName != null && (r.refAttrName = String(e.refAttrName)), e.docString != null && (r.docString = String(e.docString)), e.type) { case "UNDEFINED": case 0: r.type = 0; break; case "FLOAT": case 1: r.type = 1; break; case "INT": case 2: r.type = 2; break; case "STRING": case 3: r.type = 3; break; case "TENSOR": case 4: r.type = 4; break; case "GRAPH": case 5: r.type = 5; break; case "FLOATS": case 6: r.type = 6; break; case "INTS": case 7: r.type = 7; break; case "STRINGS": case 8: r.type = 8; break; case "TENSORS": case 9: r.type = 9; break; case "GRAPHS": case 10: r.type = 10 } if (e.f != null && (r.f = Number(e.f)), e.i != null && (l.Long ? (r.i = l.Long.fromValue(e.i)).unsigned = !1 : typeof e.i == "string" ? r.i = parseInt(e.i, 10) : typeof e.i == "number" ? r.i = e.i : typeof e.i == "object" && (r.i = new l.LongBits(e.i.low >>> 0, e.i.high >>> 0).toNumber())), e.s != null && (typeof e.s == "string" ? l.base64.decode(e.s, r.s = l.newBuffer(l.base64.length(e.s)), 0) : e.s.length && (r.s = e.s)), e.t != null) { if (typeof e.t != "object") throw TypeError(".onnx.AttributeProto.t: object expected"); r.t = o.onnx.TensorProto.fromObject(e.t) } if (e.g != null) { if (typeof e.g != "object") throw TypeError(".onnx.AttributeProto.g: object expected"); r.g = o.onnx.GraphProto.fromObject(e.g) } if (e.floats) { if (!Array.isArray(e.floats)) throw TypeError(".onnx.AttributeProto.floats: array expected"); r.floats = []; for (var i = 0; i < e.floats.length; ++i) r.floats[i] = Number(e.floats[i]) } if (e.ints) { if (!Array.isArray(e.ints)) throw TypeError(".onnx.AttributeProto.ints: array expected"); for (r.ints = [], i = 0; i < e.ints.length; ++i) l.Long ? (r.ints[i] = l.Long.fromValue(e.ints[i])).unsigned = !1 : typeof e.ints[i] == "string" ? r.ints[i] = parseInt(e.ints[i], 10) : typeof e.ints[i] == "number" ? r.ints[i] = e.ints[i] : typeof e.ints[i] == "object" && (r.ints[i] = new l.LongBits(e.ints[i].low >>> 0, e.ints[i].high >>> 0).toNumber()) } if (e.strings) { if (!Array.isArray(e.strings)) throw TypeError(".onnx.AttributeProto.strings: array expected"); for (r.strings = [], i = 0; i < e.strings.length; ++i) typeof e.strings[i] == "string" ? l.base64.decode(e.strings[i], r.strings[i] = l.newBuffer(l.base64.length(e.strings[i])), 0) : e.strings[i].length && (r.strings[i] = e.strings[i]) } if (e.tensors) { if (!Array.isArray(e.tensors)) throw TypeError(".onnx.AttributeProto.tensors: array expected"); for (r.tensors = [], i = 0; i < e.tensors.length; ++i) { if (typeof e.tensors[i] != "object") throw TypeError(".onnx.AttributeProto.tensors: object expected"); r.tensors[i] = o.onnx.TensorProto.fromObject(e.tensors[i]) } } if (e.graphs) { if (!Array.isArray(e.graphs)) throw TypeError(".onnx.AttributeProto.graphs: array expected"); for (r.graphs = [], i = 0; i < e.graphs.length; ++i) { if (typeof e.graphs[i] != "object") throw TypeError(".onnx.AttributeProto.graphs: object expected"); r.graphs[i] = o.onnx.GraphProto.fromObject(e.graphs[i]) } } return r }, t.toObject = function(e, r) { r || (r = {}); var i = {}; if ((r.arrays || r.defaults) && (i.floats = [], i.ints = [], i.strings = [], i.tensors = [], i.graphs = []), r.defaults) { if (i.name = "", i.f = 0, l.Long) { var d = new l.Long(0, 0, !1); i.i = r.longs === String ? d.toString() : r.longs === Number ? d.toNumber() : d } else i.i = r.longs === String ? "0" : 0; r.bytes === String ? i.s = "" : (i.s = [], r.bytes !== Array && (i.s = l.newBuffer(i.s))), i.t = null, i.g = null, i.docString = "", i.type = r.enums === String ? "UNDEFINED" : 0, i.refAttrName = "" } if (e.name != null && e.hasOwnProperty("name") && (i.name = e.name), e.f != null && e.hasOwnProperty("f") && (i.f = r.json && !isFinite(e.f) ? String(e.f) : e.f), e.i != null && e.hasOwnProperty("i") && (typeof e.i == "number" ? i.i = r.longs === String ? String(e.i) : e.i : i.i = r.longs === String ? l.Long.prototype.toString.call(e.i) : r.longs === Number ? new l.LongBits(e.i.low >>> 0, e.i.high >>> 0).toNumber() : e.i), e.s != null && e.hasOwnProperty("s") && (i.s = r.bytes === String ? l.base64.encode(e.s, 0, e.s.length) : r.bytes === Array ? Array.prototype.slice.call(e.s) : e.s), e.t != null && e.hasOwnProperty("t") && (i.t = o.onnx.TensorProto.toObject(e.t, r)), e.g != null && e.hasOwnProperty("g") && (i.g = o.onnx.GraphProto.toObject(e.g, r)), e.floats && e.floats.length) { i.floats = []; for (var g = 0; g < e.floats.length; ++g) i.floats[g] = r.json && !isFinite(e.floats[g]) ? String(e.floats[g]) : e.floats[g] } if (e.ints && e.ints.length) for (i.ints = [], g = 0; g < e.ints.length; ++g) typeof e.ints[g] == "number" ? i.ints[g] = r.longs === String ? String(e.ints[g]) : e.ints[g] : i.ints[g] = r.longs === String ? l.Long.prototype.toString.call(e.ints[g]) : r.longs === Number ? new l.LongBits(e.ints[g].low >>> 0, e.ints[g].high >>> 0).toNumber() : e.ints[g]; if (e.strings && e.strings.length) for (i.strings = [], g = 0; g < e.strings.length; ++g) i.strings[g] = r.bytes === String ? l.base64.encode(e.strings[g], 0, e.strings[g].length) : r.bytes === Array ? Array.prototype.slice.call(e.strings[g]) : e.strings[g]; if (e.tensors && e.tensors.length) for (i.tensors = [], g = 0; g < e.tensors.length; ++g) i.tensors[g] = o.onnx.TensorProto.toObject(e.tensors[g], r); if (e.graphs && e.graphs.length) for (i.graphs = [], g = 0; g < e.graphs.length; ++g) i.graphs[g] = o.onnx.GraphProto.toObject(e.graphs[g], r); return e.docString != null && e.hasOwnProperty("docString") && (i.docString = e.docString), e.type != null && e.hasOwnProperty("type") && (i.type = r.enums === String ? o.onnx.AttributeProto.AttributeType[e.type] : e.type), e.refAttrName != null && e.hasOwnProperty("refAttrName") && (i.refAttrName = e.refAttrName), i }, t.prototype.toJSON = function() { return this.constructor.toObject(this, s.util.toJSONOptions) }, t.AttributeType = function() { var e = {}, r = Object.create(e); return r[e[0] = "UNDEFINED"] = 0, r[e[1] = "FLOAT"] = 1, r[e[2] = "INT"] = 2, r[e[3] = "STRING"] = 3, r[e[4] = "TENSOR"] = 4, r[e[5] = "GRAPH"] = 5, r[e[6] = "FLOATS"] = 6, r[e[7] = "INTS"] = 7, r[e[8] = "STRINGS"] = 8, r[e[9] = "TENSORS"] = 9, r[e[10] = "GRAPHS"] = 10, r }(), t }(), f.ValueInfoProto = function() { function t(e) { if (e) for (var r = Object.keys(e), i = 0; i < r.length; ++i) e[r[i]] != null && (this[r[i]] = e[r[i]]) } return t.prototype.name = "", t.prototype.type = null, t.prototype.docString = "", t.create = function(e) { return new t(e) }, t.encode = function(e, r) { return r || (r = p.create()), e.name != null && e.hasOwnProperty("name") && r.uint32(10).string(e.name), e.type != null && e.hasOwnProperty("type") && o.onnx.TypeProto.encode(e.type, r.uint32(18).fork()).ldelim(), e.docString != null && e.hasOwnProperty("docString") && r.uint32(26).string(e.docString), r }, t.encodeDelimited = function(e, r) { return this.encode(e, r).ldelim() }, t.decode = function(e, r) { e instanceof h || (e = h.create(e)); for (var i = r === void 0 ? e.len : e.pos + r, d = new o.onnx.ValueInfoProto; e.pos < i;) { var g = e.uint32(); switch (g >>> 3) { case 1: d.name = e.string(); break; case 2: d.type = o.onnx.TypeProto.decode(e, e.uint32()); break; case 3: d.docString = e.string(); break; default: e.skipType(7 & g) } } return d }, t.decodeDelimited = function(e) { return e instanceof h || (e = new h(e)), this.decode(e, e.uint32()) }, t.verify = function(e) { if (typeof e != "object" || e === null) return "object expected"; if (e.name != null && e.hasOwnProperty("name") && !l.isString(e.name)) return "name: string expected"; if (e.type != null && e.hasOwnProperty("type")) { var r = o.onnx.TypeProto.verify(e.type); if (r) return "type." + r } return e.docString != null && e.hasOwnProperty("docString") && !l.isString(e.docString) ? "docString: string expected" : null }, t.fromObject = function(e) { if (e instanceof o.onnx.ValueInfoProto) return e; var r = new o.onnx.ValueInfoProto; if (e.name != null && (r.name = String(e.name)), e.type != null) { if (typeof e.type != "object") throw TypeError(".onnx.ValueInfoProto.type: object expected"); r.type = o.onnx.TypeProto.fromObject(e.type) } return e.docString != null && (r.docString = String(e.docString)), r }, t.toObject = function(e, r) { r || (r = {}); var i = {}; return r.defaults && (i.name = "", i.type = null, i.docString = ""), e.name != null && e.hasOwnProperty("name") && (i.name = e.name), e.type != null && e.hasOwnProperty("type") && (i.type = o.onnx.TypeProto.toObject(e.type, r)), e.docString != null && e.hasOwnProperty("docString") && (i.docString = e.docString), i }, t.prototype.toJSON = function() { return this.constructor.toObject(this, s.util.toJSONOptions) }, t }(), f.NodeProto = function() { function t(e) { if (this.input = [], this.output = [], this.attribute = [], e) for (var r = Object.keys(e), i = 0; i < r.length; ++i) e[r[i]] != null && (this[r[i]] = e[r[i]]) } return t.prototype.input = l.emptyArray, t.prototype.output = l.emptyArray, t.prototype.name = "", t.prototype.opType = "", t.prototype.domain = "", t.prototype.attribute = l.emptyArray, t.prototype.docString = "", t.create = function(e) { return new t(e) }, t.encode = function(e, r) { if (r || (r = p.create()), e.input != null && e.input.length) for (var i = 0; i < e.input.length; ++i) r.uint32(10).string(e.input[i]); if (e.output != null && e.output.length) for (i = 0; i < e.output.length; ++i) r.uint32(18).string(e.output[i]); if (e.name != null && e.hasOwnProperty("name") && r.uint32(26).string(e.name), e.opType != null && e.hasOwnProperty("opType") && r.uint32(34).string(e.opType), e.attribute != null && e.attribute.length) for (i = 0; i < e.attribute.length; ++i) o.onnx.AttributeProto.encode(e.attribute[i], r.uint32(42).fork()).ldelim(); return e.docString != null && e.hasOwnProperty("docString") && r.uint32(50).string(e.docString), e.domain != null && e.hasOwnProperty("domain") && r.uint32(58).string(e.domain), r }, t.encodeDelimited = function(e, r) { return this.encode(e, r).ldelim() }, t.decode = function(e, r) { e instanceof h || (e = h.create(e)); for (var i = r === void 0 ? e.len : e.pos + r, d = new o.onnx.NodeProto; e.pos < i;) { var g = e.uint32(); switch (g >>> 3) { case 1: d.input && d.input.length || (d.input = []), d.input.push(e.string()); break; case 2: d.output && d.output.length || (d.output = []), d.output.push(e.string()); break; case 3: d.name = e.string(); break; case 4: d.opType = e.string(); break; case 7: d.domain = e.string(); break; case 5: d.attribute && d.attribute.length || (d.attribute = []), d.attribute.push(o.onnx.AttributeProto.decode(e, e.uint32())); break; case 6: d.docString = e.string(); break; default: e.skipType(7 & g) } } return d }, t.decodeDelimited = function(e) { return e instanceof h || (e = new h(e)), this.decode(e, e.uint32()) }, t.verify = function(e) { if (typeof e != "object" || e === null) return "object expected"; if (e.input != null && e.hasOwnProperty("input")) { if (!Array.isArray(e.input)) return "input: array expected"; for (var r = 0; r < e.input.length; ++r) if (!l.isString(e.input[r])) return "input: string[] expected" } if (e.output != null && e.hasOwnProperty("output")) { if (!Array.isArray(e.output)) return "output: array expected"; for (r = 0; r < e.output.length; ++r) if (!l.isString(e.output[r])) return "output: string[] expected" } if (e.name != null && e.hasOwnProperty("name") && !l.isString(e.name)) return "name: string expected"; if (e.opType != null && e.hasOwnProperty("opType") && !l.isString(e.opType)) return "opType: string expected"; if (e.domain != null && e.hasOwnProperty("domain") && !l.isString(e.domain)) return "domain: string expected"; if (e.attribute != null && e.hasOwnProperty("attribute")) { if (!Array.isArray(e.attribute)) return "attribute: array expected"; for (r = 0; r < e.attribute.length; ++r) { var i = o.onnx.AttributeProto.verify(e.attribute[r]); if (i) return "attribute." + i } } return e.docString != null && e.hasOwnProperty("docString") && !l.isString(e.docString) ? "docString: string expected" : null }, t.fromObject = function(e) { if (e instanceof o.onnx.NodeProto) return e; var r = new o.onnx.NodeProto; if (e.input) { if (!Array.isArray(e.input)) throw TypeError(".onnx.NodeProto.input: array expected"); r.input = []; for (var i = 0; i < e.input.length; ++i) r.input[i] = String(e.input[i]) } if (e.output) { if (!Array.isArray(e.output)) throw TypeError(".onnx.NodeProto.output: array expected"); for (r.output = [], i = 0; i < e.output.length; ++i) r.output[i] = String(e.output[i]) } if (e.name != null && (r.name = String(e.name)), e.opType != null && (r.opType = String(e.opType)), e.domain != null && (r.domain = String(e.domain)), e.attribute) { if (!Array.isArray(e.attribute)) throw TypeError(".onnx.NodeProto.attribute: array expected"); for (r.attribute = [], i = 0; i < e.attribute.length; ++i) { if (typeof e.attribute[i] != "object") throw TypeError(".onnx.NodeProto.attribute: object expected"); r.attribute[i] = o.onnx.AttributeProto.fromObject(e.attribute[i]) } } return e.docString != null && (r.docString = String(e.docString)), r }, t.toObject = function(e, r) { r || (r = {}); var i = {}; if ((r.arrays || r.defaults) && (i.input = [], i.output = [], i.attribute = []), r.defaults && (i.name = "", i.opType = "", i.docString = "", i.domain = ""), e.input && e.input.length) { i.input = []; for (var d = 0; d < e.input.length; ++d) i.input[d] = e.input[d] } if (e.output && e.output.length) for (i.output = [], d = 0; d < e.output.length; ++d) i.output[d] = e.output[d]; if (e.name != null && e.hasOwnProperty("name") && (i.name = e.name), e.opType != null && e.hasOwnProperty("opType") && (i.opType = e.opType), e.attribute && e.attribute.length) for (i.attribute = [], d = 0; d < e.attribute.length; ++d) i.attribute[d] = o.onnx.AttributeProto.toObject(e.attribute[d], r); return e.docString != null && e.hasOwnProperty("docString") && (i.docString = e.docString), e.domain != null && e.hasOwnProperty("domain") && (i.domain = e.domain), i }, t.prototype.toJSON = function() { return this.constructor.toObject(this, s.util.toJSONOptions) }, t }(), f.ModelProto = function() { function t(e) { if (this.opsetImport = [], this.metadataProps = [], e) for (var r = Object.keys(e), i = 0; i < r.length; ++i) e[r[i]] != null && (this[r[i]] = e[r[i]]) } return t.prototype.irVersion = l.Long ? l.Long.fromBits(0, 0, !1) : 0, t.prototype.opsetImport = l.emptyArray, t.prototype.producerName = "", t.prototype.producerVersion = "", t.prototype.domain = "", t.prototype.modelVersion = l.Long ? l.Long.fromBits(0, 0, !1) : 0, t.prototype.docString = "", t.prototype.graph = null, t.prototype.metadataProps = l.emptyArray, t.create = function(e) { return new t(e) }, t.encode = function(e, r) { if (r || (r = p.create()), e.irVersion != null && e.hasOwnProperty("irVersion") && r.uint32(8).int64(e.irVersion), e.producerName != null && e.hasOwnProperty("producerName") && r.uint32(18).string(e.producerName), e.producerVersion != null && e.hasOwnProperty("producerVersion") && r.uint32(26).string(e.producerVersion), e.domain != null && e.hasOwnProperty("domain") && r.uint32(34).string(e.domain), e.modelVersion != null && e.hasOwnProperty("modelVersion") && r.uint32(40).int64(e.modelVersion), e.docString != null && e.hasOwnProperty("docString") && r.uint32(50).string(e.docString), e.graph != null && e.hasOwnProperty("graph") && o.onnx.GraphProto.encode(e.graph, r.uint32(58).fork()).ldelim(), e.opsetImport != null && e.opsetImport.length) for (var i = 0; i < e.opsetImport.length; ++i) o.onnx.OperatorSetIdProto.encode(e.opsetImport[i], r.uint32(66).fork()).ldelim(); if (e.metadataProps != null && e.metadataProps.length) for (i = 0; i < e.metadataProps.length; ++i) o.onnx.StringStringEntryProto.encode(e.metadataProps[i], r.uint32(114).fork()).ldelim(); return r }, t.encodeDelimited = function(e, r) { return this.encode(e, r).ldelim() }, t.decode = function(e, r) { e instanceof h || (e = h.create(e)); for (var i = r === void 0 ? e.len : e.pos + r, d = new o.onnx.ModelProto; e.pos < i;) { var g = e.uint32(); switch (g >>> 3) { case 1: d.irVersion = e.int64(); break; case 8: d.opsetImport && d.opsetImport.length || (d.opsetImport = []), d.opsetImport.push(o.onnx.OperatorSetIdProto.decode(e, e.uint32())); break; case 2: d.producerName = e.string(); break; case 3: d.producerVersion = e.string(); break; case 4: d.domain = e.string(); break; case 5: d.modelVersion = e.int64(); break; case 6: d.docString = e.string(); break; case 7: d.graph = o.onnx.GraphProto.decode(e, e.uint32()); break; case 14: d.metadataProps && d.metadataProps.length || (d.metadataProps = []), d.metadataProps.push(o.onnx.StringStringEntryProto.decode(e, e.uint32())); break; default: e.skipType(7 & g) } } return d }, t.decodeDelimited = function(e) { return e instanceof h || (e = new h(e)), this.decode(e, e.uint32()) }, t.verify = function(e) { if (typeof e != "object" || e === null) return "object expected"; if (e.irVersion != null && e.hasOwnProperty("irVersion") && !(l.isInteger(e.irVersion) || e.irVersion && l.isInteger(e.irVersion.low) && l.isInteger(e.irVersion.high))) return "irVersion: integer|Long expected"; if (e.opsetImport != null && e.hasOwnProperty("opsetImport")) { if (!Array.isArray(e.opsetImport)) return "opsetImport: array expected"; for (var r = 0; r < e.opsetImport.length; ++r) if (i = o.onnx.OperatorSetIdProto.verify(e.opsetImport[r])) return "opsetImport." + i } if (e.producerName != null && e.hasOwnProperty("producerName") && !l.isString(e.producerName)) return "producerName: string expected"; if (e.producerVersion != null && e.hasOwnProperty("producerVersion") && !l.isString(e.producerVersion)) return "producerVersion: string expected"; if (e.domain != null && e.hasOwnProperty("domain") && !l.isString(e.domain)) return "domain: string expected"; if (e.modelVersion != null && e.hasOwnProperty("modelVersion") && !(l.isInteger(e.modelVersion) || e.modelVersion && l.isInteger(e.modelVersion.low) && l.isInteger(e.modelVersion.high))) return "modelVersion: integer|Long expected"; if (e.docString != null && e.hasOwnProperty("docString") && !l.isString(e.docString)) return "docString: string expected"; if (e.graph != null && e.hasOwnProperty("graph") && (i = o.onnx.GraphProto.verify(e.graph))) return "graph." + i; if (e.metadataProps != null && e.hasOwnProperty("metadataProps")) { if (!Array.isArray(e.metadataProps)) return "metadataProps: array expected"; for (r = 0; r < e.metadataProps.length; ++r) { var i; if (i = o.onnx.StringStringEntryProto.verify(e.metadataProps[r])) return "metadataProps." + i } } return null }, t.fromObject = function(e) { if (e instanceof o.onnx.ModelProto) return e; var r = new o.onnx.ModelProto; if (e.irVersion != null && (l.Long ? (r.irVersion = l.Long.fromValue(e.irVersion)).unsigned = !1 : typeof e.irVersion == "string" ? r.irVersion = parseInt(e.irVersion, 10) : typeof e.irVersion == "number" ? r.irVersion = e.irVersion : typeof e.irVersion == "object" && (r.irVersion = new l.LongBits(e.irVersion.low >>> 0, e.irVersion.high >>> 0).toNumber())), e.opsetImport) { if (!Array.isArray(e.opsetImport)) throw TypeError(".onnx.ModelProto.opsetImport: array expected"); r.opsetImport = []; for (var i = 0; i < e.opsetImport.length; ++i) { if (typeof e.opsetImport[i] != "object") throw TypeError(".onnx.ModelProto.opsetImport: object expected"); r.opsetImport[i] = o.onnx.OperatorSetIdProto.fromObject(e.opsetImport[i]) } } if (e.producerName != null && (r.producerName = String(e.producerName)), e.producerVersion != null && (r.producerVersion = String(e.producerVersion)), e.domain != null && (r.domain = String(e.domain)), e.modelVersion != null && (l.Long ? (r.modelVersion = l.Long.fromValue(e.modelVersion)).unsigned = !1 : typeof e.modelVersion == "string" ? r.modelVersion = parseInt(e.modelVersion, 10) : typeof e.modelVersion == "number" ? r.modelVersion = e.modelVersion : typeof e.modelVersion == "object" && (r.modelVersion = new l.LongBits(e.modelVersion.low >>> 0, e.modelVersion.high >>> 0).toNumber())), e.docString != null && (r.docString = String(e.docString)), e.graph != null) { if (typeof e.graph != "object") throw TypeError(".onnx.ModelProto.graph: object expected"); r.graph = o.onnx.GraphProto.fromObject(e.graph) } if (e.metadataProps) { if (!Array.isArray(e.metadataProps)) throw TypeError(".onnx.ModelProto.metadataProps: array expected"); for (r.metadataProps = [], i = 0; i < e.metadataProps.length; ++i) { if (typeof e.metadataProps[i] != "object") throw TypeError(".onnx.ModelProto.metadataProps: object expected"); r.metadataProps[i] = o.onnx.StringStringEntryProto.fromObject(e.metadataProps[i]) } } return r }, t.toObject = function(e, r) { r || (r = {}); var i = {}; if ((r.arrays || r.defaults) && (i.opsetImport = [], i.metadataProps = []), r.defaults) { if (l.Long) { var d = new l.Long(0, 0, !1); i.irVersion = r.longs === String ? d.toString() : r.longs === Number ? d.toNumber() : d } else i.irVersion = r.longs === String ? "0" : 0; i.producerName = "", i.producerVersion = "", i.domain = "", l.Long ? (d = new l.Long(0, 0, !1), i.modelVersion = r.longs === String ? d.toString() : r.longs === Number ? d.toNumber() : d) : i.modelVersion = r.longs === String ? "0" : 0, i.docString = "", i.graph = null } if (e.irVersion != null && e.hasOwnProperty("irVersion") && (typeof e.irVersion == "number" ? i.irVersion = r.longs === String ? String(e.irVersion) : e.irVersion : i.irVersion = r.longs === String ? l.Long.prototype.toString.call(e.irVersion) : r.longs === Number ? new l.LongBits(e.irVersion.low >>> 0, e.irVersion.high >>> 0).toNumber() : e.irVersion), e.producerName != null && e.hasOwnProperty("producerName") && (i.producerName = e.producerName), e.producerVersion != null && e.hasOwnProperty("producerVersion") && (i.producerVersion = e.producerVersion), e.domain != null && e.hasOwnProperty("domain") && (i.domain = e.domain), e.modelVersion != null && e.hasOwnProperty("modelVersion") && (typeof e.modelVersion == "number" ? i.modelVersion = r.longs === String ? String(e.modelVersion) : e.modelVersion : i.modelVersion = r.longs === String ? l.Long.prototype.toString.call(e.modelVersion) : r.longs === Number ? new l.LongBits(e.modelVersion.low >>> 0, e.modelVersion.high >>> 0).toNumber() : e.modelVersion), e.docString != null && e.hasOwnProperty("docString") && (i.docString = e.docString), e.graph != null && e.hasOwnProperty("graph") && (i.graph = o.onnx.GraphProto.toObject(e.graph, r)), e.opsetImport && e.opsetImport.length) { i.opsetImport = []; for (var g = 0; g < e.opsetImport.length; ++g) i.opsetImport[g] = o.onnx.OperatorSetIdProto.toObject(e.opsetImport[g], r) } if (e.metadataProps && e.metadataProps.length) for (i.metadataProps = [], g = 0; g < e.metadataProps.length; ++g) i.metadataProps[g] = o.onnx.StringStringEntryProto.toObject(e.metadataProps[g], r); return i }, t.prototype.toJSON = function() { return this.constructor.toObject(this, s.util.toJSONOptions) }, t }(), f.StringStringEntryProto = function() { function t(e) { if (e) for (var r = Object.keys(e), i = 0; i < r.length; ++i) e[r[i]] != null && (this[r[i]] = e[r[i]]) } return t.prototype.key = "", t.prototype.value = "", t.create = function(e) { return new t(e) }, t.encode = function(e, r) { return r || (r = p.create()), e.key != null && e.hasOwnProperty("key") && r.uint32(10).string(e.key), e.value != null && e.hasOwnProperty("value") && r.uint32(18).string(e.value), r }, t.encodeDelimited = function(e, r) { return this.encode(e, r).ldelim() }, t.decode = function(e, r) { e instanceof h || (e = h.create(e)); for (var i = r === void 0 ? e.len : e.pos + r, d = new o.onnx.StringStringEntryProto; e.pos < i;) { var g = e.uint32(); switch (g >>> 3) { case 1: d.key = e.string(); break; case 2: d.value = e.string(); break; default: e.skipType(7 & g) } } return d }, t.decodeDelimited = function(e) { return e instanceof h || (e = new h(e)), this.decode(e, e.uint32()) }, t.verify = function(e) { return typeof e != "object" || e === null ? "object expected" : e.key != null && e.hasOwnProperty("key") && !l.isString(e.key) ? "key: string expected" : e.value != null && e.hasOwnProperty("value") && !l.isString(e.value) ? "value: string expected" : null }, t.fromObject = function(e) { if (e instanceof o.onnx.StringStringEntryProto) return e; var r = new o.onnx.StringStringEntryProto; return e.key != null && (r.key = String(e.key)), e.value != null && (r.value = String(e.value)), r }, t.toObject = function(e, r) { r || (r = {}); var i = {}; return r.defaults && (i.key = "", i.value = ""), e.key != null && e.hasOwnProperty("key") && (i.key = e.key), e.value != null && e.hasOwnProperty("value") && (i.value = e.value), i }, t.prototype.toJSON = function() { return this.constructor.toObject(this, s.util.toJSONOptions) }, t }(), f.TensorAnnotation = function() { function t(e) { if (this.quantParameterTensorNames = [], e) for (var r = Object.keys(e), i = 0; i < r.length; ++i) e[r[i]] != null && (this[r[i]] = e[r[i]]) } return t.prototype.tensorName = "", t.prototype.quantParameterTensorNames = l.emptyArray, t.create = function(e) { return new t(e) }, t.encode = function(e, r) { if (r || (r = p.create()), e.tensorName != null && e.hasOwnProperty("tensorName") && r.uint32(10).string(e.tensorName), e.quantParameterTensorNames != null && e.quantParameterTensorNames.length) for (var i = 0; i < e.quantParameterTensorNames.length; ++i) o.onnx.StringStringEntryProto.encode(e.quantParameterTensorNames[i], r.uint32(18).fork()).ldelim(); return r }, t.encodeDelimited = function(e, r) { return this.encode(e, r).ldelim() }, t.decode = function(e, r) { e instanceof h || (e = h.create(e)); for (var i = r === void 0 ? e.len : e.pos + r, d = new o.onnx.TensorAnnotation; e.pos < i;) { var g = e.uint32(); switch (g >>> 3) { case 1: d.tensorName = e.string(); break; case 2: d.quantParameterTensorNames && d.quantParameterTensorNames.length || (d.quantParameterTensorNames = []), d.quantParameterTensorNames.push(o.onnx.StringStringEntryProto.decode(e, e.uint32())); break; default: e.skipType(7 & g) } } return d }, t.decodeDelimited = function(e) { return e instanceof h || (e = new h(e)), this.decode(e, e.uint32()) }, t.verify = function(e) { if (typeof e != "object" || e === null) return "object expected"; if (e.tensorName != null && e.hasOwnProperty("tensorName") && !l.isString(e.tensorName)) return "tensorName: string expected"; if (e.quantParameterTensorNames != null && e.hasOwnProperty("quantParameterTensorNames")) { if (!Array.isArray(e.quantParameterTensorNames)) return "quantParameterTensorNames: array expected"; for (var r = 0; r < e.quantParameterTensorNames.length; ++r) { var i = o.onnx.StringStringEntryProto.verify(e.quantParameterTensorNames[r]); if (i) return "quantParameterTensorNames." + i } } return null }, t.fromObject = function(e) { if (e instanceof o.onnx.TensorAnnotation) return e; var r = new o.onnx.TensorAnnotation; if (e.tensorName != null && (r.tensorName = String(e.tensorName)), e.quantParameterTensorNames) { if (!Array.isArray(e.quantParameterTensorNames)) throw TypeError(".onnx.TensorAnnotation.quantParameterTensorNames: array expected"); r.quantParameterTensorNames = []; for (var i = 0; i < e.quantParameterTensorNames.length; ++i) { if (typeof e.quantParameterTensorNames[i] != "object") throw TypeError(".onnx.TensorAnnotation.quantParameterTensorNames: object expected"); r.quantParameterTensorNames[i] = o.onnx.StringStringEntryProto.fromObject(e.quantParameterTensorNames[i]) } } return r }, t.toObject = function(e, r) { r || (r = {}); var i = {}; if ((r.arrays || r.defaults) && (i.quantParameterTensorNames = []), r.defaults && (i.tensorName = ""), e.tensorName != null && e.hasOwnProperty("tensorName") && (i.tensorName = e.tensorName), e.quantParameterTensorNames && e.quantParameterTensorNames.length) { i.quantParameterTensorNames = []; for (var d = 0; d < e.quantParameterTensorNames.length; ++d) i.quantParameterTensorNames[d] = o.onnx.StringStringEntryProto.toObject(e.quantParameterTensorNames[d], r) } return i }, t.prototype.toJSON = function() { return this.constructor.toObject(this, s.util.toJSONOptions) }, t }(), f.GraphProto = function() { function t(e) { if (this.node = [], this.initializer = [], this.input = [], this.output = [], this.valueInfo = [], this.quantizationAnnotation = [], e) for (var r = Object.keys(e), i = 0; i < r.length; ++i) e[r[i]] != null && (this[r[i]] = e[r[i]]) } return t.prototype.node = l.emptyArray, t.prototype.name = "", t.prototype.initializer = l.emptyArray, t.prototype.docString = "", t.prototype.input = l.emptyArray, t.prototype.output = l.emptyArray, t.prototype.valueInfo = l.emptyArray, t.prototype.quantizationAnnotation = l.emptyArray, t.create = function(e) { return new t(e) }, t.encode = function(e, r) { if (r || (r = p.create()), e.node != null && e.node.length) for (var i = 0; i < e.node.length; ++i) o.onnx.NodeProto.encode(e.node[i], r.uint32(10).fork()).ldelim(); if (e.name != null && e.hasOwnProperty("name") && r.uint32(18).string(e.name), e.initializer != null && e.initializer.length) for (i = 0; i < e.initializer.length; ++i) o.onnx.TensorProto.encode(e.initializer[i], r.uint32(42).fork()).ldelim(); if (e.docString != null && e.hasOwnProperty("docString") && r.uint32(82).string(e.docString), e.input != null && e.input.length) for (i = 0; i < e.input.length; ++i) o.onnx.ValueInfoProto.encode(e.input[i], r.uint32(90).fork()).ldelim(); if (e.output != null && e.output.length) for (i = 0; i < e.output.length; ++i) o.onnx.ValueInfoProto.encode(e.output[i], r.uint32(98).fork()).ldelim(); if (e.valueInfo != null && e.valueInfo.length) for (i = 0; i < e.valueInfo.length; ++i) o.onnx.ValueInfoProto.encode(e.valueInfo[i], r.uint32(106).fork()).ldelim(); if (e.quantizationAnnotation != null && e.quantizationAnnotation.length) for (i = 0; i < e.quantizationAnnotation.length; ++i) o.onnx.TensorAnnotation.encode(e.quantizationAnnotation[i], r.uint32(114).fork()).ldelim(); return r }, t.encodeDelimited = function(e, r) { return this.encode(e, r).ldelim() }, t.decode = function(e, r) { e instanceof h || (e = h.create(e)); for (var i = r === void 0 ? e.len : e.pos + r, d = new o.onnx.GraphProto; e.pos < i;) { var g = e.uint32(); switch (g >>> 3) { case 1: d.node && d.node.length || (d.node = []), d.node.push(o.onnx.NodeProto.decode(e, e.uint32())); break; case 2: d.name = e.string(); break; case 5: d.initializer && d.initializer.length || (d.initializer = []), d.initializer.push(o.onnx.TensorProto.decode(e, e.uint32())); break; case 10: d.docString = e.string(); break; case 11: d.input && d.input.length || (d.input = []), d.input.push(o.onnx.ValueInfoProto.decode(e, e.uint32())); break; case 12: d.output && d.output.length || (d.output = []), d.output.push(o.onnx.ValueInfoProto.decode(e, e.uint32())); break; case 13: d.valueInfo && d.valueInfo.length || (d.valueInfo = []), d.valueInfo.push(o.onnx.ValueInfoProto.decode(e, e.uint32())); break; case 14: d.quantizationAnnotation && d.quantizationAnnotation.length || (d.quantizationAnnotation = []), d.quantizationAnnotation.push(o.onnx.TensorAnnotation.decode(e, e.uint32())); break; default: e.skipType(7 & g) } } return d }, t.decodeDelimited = function(e) { return e instanceof h || (e = new h(e)), this.decode(e, e.uint32()) }, t.verify = function(e) { if (typeof e != "object" || e === null) return "object expected"; if (e.node != null && e.hasOwnProperty("node")) { if (!Array.isArray(e.node)) return "node: array expected"; for (var r = 0; r < e.node.length; ++r) if (i = o.onnx.NodeProto.verify(e.node[r])) return "node." + i } if (e.name != null && e.hasOwnProperty("name") && !l.isString(e.name)) return "name: string expected"; if (e.initializer != null && e.hasOwnProperty("initializer")) { if (!Array.isArray(e.initializer)) return "initializer: array expected"; for (r = 0; r < e.initializer.length; ++r) if (i = o.onnx.TensorProto.verify(e.initializer[r])) return "initializer." + i } if (e.docString != null && e.hasOwnProperty("docString") && !l.isString(e.docString)) return "docString: string expected"; if (e.input != null && e.hasOwnProperty("input")) { if (!Array.isArray(e.input)) return "input: array expected"; for (r = 0; r < e.input.length; ++r) if (i = o.onnx.ValueInfoProto.verify(e.input[r])) return "input." + i } if (e.output != null && e.hasOwnProperty("output")) { if (!Array.isArray(e.output)) return "output: array expected"; for (r = 0; r < e.output.length; ++r) if (i = o.onnx.ValueInfoProto.verify(e.output[r])) return "output." + i } if (e.valueInfo != null && e.hasOwnProperty("valueInfo")) { if (!Array.isArray(e.valueInfo)) return "valueInfo: array expected"; for (r = 0; r < e.valueInfo.length; ++r) if (i = o.onnx.ValueInfoProto.verify(e.valueInfo[r])) return "valueInfo." + i } if (e.quantizationAnnotation != null && e.hasOwnProperty("quantizationAnnotation")) { if (!Array.isArray(e.quantizationAnnotation)) return "quantizationAnnotation: array expected"; for (r = 0; r < e.quantizationAnnotation.length; ++r) { var i; if (i = o.onnx.TensorAnnotation.verify(e.quantizationAnnotation[r])) return "quantizationAnnotation." + i } } return null }, t.fromObject = function(e) { if (e instanceof o.onnx.GraphProto) return e; var r = new o.onnx.GraphProto; if (e.node) { if (!Array.isArray(e.node)) throw TypeError(".onnx.GraphProto.node: array expected"); r.node = []; for (var i = 0; i < e.node.length; ++i) { if (typeof e.node[i] != "object") throw TypeError(".onnx.GraphProto.node: object expected"); r.node[i] = o.onnx.NodeProto.fromObject(e.node[i]) } } if (e.name != null && (r.name = String(e.name)), e.initializer) { if (!Array.isArray(e.initializer)) throw TypeError(".onnx.GraphProto.initializer: array expected"); for (r.initializer = [], i = 0; i < e.initializer.length; ++i) { if (typeof e.initializer[i] != "object") throw TypeError(".onnx.GraphProto.initializer: object expected"); r.initializer[i] = o.onnx.TensorProto.fromObject(e.initializer[i]) } } if (e.docString != null && (r.docString = String(e.docString)), e.input) { if (!Array.isArray(e.input)) throw TypeError(".onnx.GraphProto.input: array expected"); for (r.input = [], i = 0; i < e.input.length; ++i) { if (typeof e.input[i] != "object") throw TypeError(".onnx.GraphProto.input: object expected"); r.input[i] = o.onnx.ValueInfoProto.fromObject(e.input[i]) } } if (e.output) { if (!Array.isArray(e.output)) throw TypeError(".onnx.GraphProto.output: array expected"); for (r.output = [], i = 0; i < e.output.length; ++i) { if (typeof e.output[i] != "object") throw TypeError(".onnx.GraphProto.output: object expected"); r.output[i] = o.onnx.ValueInfoProto.fromObject(e.output[i]) } } if (e.valueInfo) { if (!Array.isArray(e.valueInfo)) throw TypeError(".onnx.GraphProto.valueInfo: array expected"); for (r.valueInfo = [], i = 0; i < e.valueInfo.length; ++i) { if (typeof e.valueInfo[i] != "object") throw TypeError(".onnx.GraphProto.valueInfo: object expected"); r.valueInfo[i] = o.onnx.ValueInfoProto.fromObject(e.valueInfo[i]) } } if (e.quantizationAnnotation) { if (!Array.isArray(e.quantizationAnnotation)) throw TypeError(".onnx.GraphProto.quantizationAnnotation: array expected"); for (r.quantizationAnnotation = [], i = 0; i < e.quantizationAnnotation.length; ++i) { if (typeof e.quantizationAnnotation[i] != "object") throw TypeError(".onnx.GraphProto.quantizationAnnotation: object expected"); r.quantizationAnnotation[i] = o.onnx.TensorAnnotation.fromObject(e.quantizationAnnotation[i]) } } return r }, t.toObject = function(e, r) { r || (r = {}); var i = {}; if ((r.arrays || r.defaults) && (i.node = [], i.initializer = [], i.input = [], i.output = [], i.valueInfo = [], i.quantizationAnnotation = []), r.defaults && (i.name = "", i.docString = ""), e.node && e.node.length) { i.node = []; for (var d = 0; d < e.node.length; ++d) i.node[d] = o.onnx.NodeProto.toObject(e.node[d], r) } if (e.name != null && e.hasOwnProperty("name") && (i.name = e.name), e.initializer && e.initializer.length) for (i.initializer = [], d = 0; d < e.initializer.length; ++d) i.initializer[d] = o.onnx.TensorProto.toObject(e.initializer[d], r); if (e.docString != null && e.hasOwnProperty("docString") && (i.docString = e.docString), e.input && e.input.length) for (i.input = [], d = 0; d < e.input.length; ++d) i.input[d] = o.onnx.ValueInfoProto.toObject(e.input[d], r); if (e.output && e.output.length) for (i.output = [], d = 0; d < e.output.length; ++d) i.output[d] = o.onnx.ValueInfoProto.toObject(e.output[d], r); if (e.valueInfo && e.valueInfo.length) for (i.valueInfo = [], d = 0; d < e.valueInfo.length; ++d) i.valueInfo[d] = o.onnx.ValueInfoProto.toObject(e.valueInfo[d], r); if (e.quantizationAnnotation && e.quantizationAnnotation.length) for (i.quantizationAnnotation = [], d = 0; d < e.quantizationAnnotation.length; ++d) i.quantizationAnnotation[d] = o.onnx.TensorAnnotation.toObject(e.quantizationAnnotation[d], r); return i }, t.prototype.toJSON = function() { return this.constructor.toObject(this, s.util.toJSONOptions) }, t }(), f.TensorProto = function() { function t(e) { if (this.dims = [], this.floatData = [], this.int32Data = [], this.stringData = [], this.int64Data = [], this.externalData = [], this.doubleData = [], this.uint64Data = [], e) for (var r = Object.keys(e), i = 0; i < r.length; ++i) e[r[i]] != null && (this[r[i]] = e[r[i]]) } return t.prototype.dims = l.emptyArray, t.prototype.dataType = 0, t.prototype.segment = null, t.prototype.floatData = l.emptyArray, t.prototype.int32Data = l.emptyArray, t.prototype.stringData = l.emptyArray, t.prototype.int64Data = l.emptyArray, t.prototype.name = "", t.prototype.docString = "", t.prototype.rawData = l.newBuffer([]), t.prototype.externalData = l.emptyArray, t.prototype.dataLocation = 0, t.prototype.doubleData = l.emptyArray, t.prototype.uint64Data = l.emptyArray, t.create = function(e) { return new t(e) }, t.encode = function(e, r) { if (r || (r = p.create()), e.dims != null && e.dims.length) { r.uint32(10).fork(); for (var i = 0; i < e.dims.length; ++i) r.int64(e.dims[i]); r.ldelim() } if (e.dataType != null && e.hasOwnProperty("dataType") && r.uint32(16).int32(e.dataType), e.segment != null && e.hasOwnProperty("segment") && o.onnx.TensorProto.Segment.encode(e.segment, r.uint32(26).fork()).ldelim(), e.floatData != null && e.floatData.length) { for (r.uint32(34).fork(), i = 0; i < e.floatData.length; ++i) r.float(e.floatData[i]); r.ldelim() } if (e.int32Data != null && e.int32Data.length) { for (r.uint32(42).fork(), i = 0; i < e.int32Data.length; ++i) r.int32(e.int32Data[i]); r.ldelim() } if (e.stringData != null && e.stringData.length) for (i = 0; i < e.stringData.length; ++i) r.uint32(50).bytes(e.stringData[i]); if (e.int64Data != null && e.int64Data.length) { for (r.uint32(58).fork(), i = 0; i < e.int64Data.length; ++i) r.int64(e.int64Data[i]); r.ldelim() } if (e.name != null && e.hasOwnProperty("name") && r.uint32(66).string(e.name), e.rawData != null && e.hasOwnProperty("rawData") && r.uint32(74).bytes(e.rawData), e.doubleData != null && e.doubleData.length) { for (r.uint32(82).fork(), i = 0; i < e.doubleData.length; ++i) r.double(e.doubleData[i]); r.ldelim() } if (e.uint64Data != null && e.uint64Data.length) { for (r.uint32(90).fork(), i = 0; i < e.uint64Data.length; ++i) r.uint64(e.uint64Data[i]); r.ldelim() } if (e.docString != null && e.hasOwnProperty("docString") && r.uint32(98).string(e.docString), e.externalData != null && e.externalData.length) for (i = 0; i < e.externalData.length; ++i) o.onnx.StringStringEntryProto.encode(e.externalData[i], r.uint32(106).fork()).ldelim(); return e.dataLocation != null && e.hasOwnProperty("dataLocation") && r.uint32(112).int32(e.dataLocation), r }, t.encodeDelimited = function(e, r) { return this.encode(e, r).ldelim() }, t.decode = function(e, r) { e instanceof h || (e = h.create(e)); for (var i = r === void 0 ? e.len : e.pos + r, d = new o.onnx.TensorProto; e.pos < i;) { var g = e.uint32(); switch (g >>> 3) { case 1: if (d.dims && d.dims.length || (d.dims = []), (7 & g) == 2) for (var m = e.uint32() + e.pos; e.pos < m;) d.dims.push(e.int64()); else d.dims.push(e.int64()); break; case 2: d.dataType = e.int32(); break; case 3: d.segment = o.onnx.TensorProto.Segment.decode(e, e.uint32()); break; case 4: if (d.floatData && d.floatData.length || (d.floatData = []), (7 & g) == 2) for (m = e.uint32() + e.pos; e.pos < m;) d.floatData.push(e.float()); else d.floatData.push(e.float()); break; case 5: if (d.int32Data && d.int32Data.length || (d.int32Data = []), (7 & g) == 2) for (m = e.uint32() + e.pos; e.pos < m;) d.int32Data.push(e.int32()); else d.int32Data.push(e.int32()); break; case 6: d.stringData && d.stringData.length || (d.stringData = []), d.stringData.push(e.bytes()); break; case 7: if (d.int64Data && d.int64Data.length || (d.int64Data = []), (7 & g) == 2) for (m = e.uint32() + e.pos; e.pos < m;) d.int64Data.push(e.int64()); else d.int64Data.push(e.int64()); break; case 8: d.name = e.string(); break; case 12: d.docString = e.string(); break; case 9: d.rawData = e.bytes(); break; case 13: d.externalData && d.externalData.length || (d.externalData = []), d.externalData.push(o.onnx.StringStringEntryProto.decode(e, e.uint32())); break; case 14: d.dataLocation = e.int32(); break; case 10: if (d.doubleData && d.doubleData.length || (d.doubleData = []), (7 & g) == 2) for (m = e.uint32() + e.pos; e.pos < m;) d.doubleData.push(e.double()); else d.doubleData.push(e.double()); break; case 11: if (d.uint64Data && d.uint64Data.length || (d.uint64Data = []), (7 & g) == 2) for (m = e.uint32() + e.pos; e.pos < m;) d.uint64Data.push(e.uint64()); else d.uint64Data.push(e.uint64()); break; default: e.skipType(7 & g) } } return d }, t.decodeDelimited = function(e) { return e instanceof h || (e = new h(e)), this.decode(e, e.uint32()) }, t.verify = function(e) { if (typeof e != "object" || e === null) return "object expected"; if (e.dims != null && e.hasOwnProperty("dims")) { if (!Array.isArray(e.dims)) return "dims: array expected"; for (var r = 0; r < e.dims.length; ++r) if (!(l.isInteger(e.dims[r]) || e.dims[r] && l.isInteger(e.dims[r].low) && l.isInteger(e.dims[r].high))) return "dims: integer|Long[] expected" } if (e.dataType != null && e.hasOwnProperty("dataType") && !l.isInteger(e.dataType)) return "dataType: integer expected"; if (e.segment != null && e.hasOwnProperty("segment") && (i = o.onnx.TensorProto.Segment.verify(e.segment))) return "segment." + i; if (e.floatData != null && e.hasOwnProperty("floatData")) { if (!Array.isArray(e.floatData)) return "floatData: array expected"; for (r = 0; r < e.floatData.length; ++r) if (typeof e.floatData[r] != "number") return "floatData: number[] expected" } if (e.int32Data != null && e.hasOwnProperty("int32Data")) { if (!Array.isArray(e.int32Data)) return "int32Data: array expected"; for (r = 0; r < e.int32Data.length; ++r) if (!l.isInteger(e.int32Data[r])) return "int32Data: integer[] expected" } if (e.stringData != null && e.hasOwnProperty("stringData")) { if (!Array.isArray(e.stringData)) return "stringData: array expected"; for (r = 0; r < e.stringData.length; ++r) if (!(e.stringData[r] && typeof e.stringData[r].length == "number" || l.isString(e.stringData[r]))) return "stringData: buffer[] expected" } if (e.int64Data != null && e.hasOwnProperty("int64Data")) { if (!Array.isArray(e.int64Data)) return "int64Data: array expected"; for (r = 0; r < e.int64Data.length; ++r) if (!(l.isInteger(e.int64Data[r]) || e.int64Data[r] && l.isInteger(e.int64Data[r].low) && l.isInteger(e.int64Data[r].high))) return "int64Data: integer|Long[] expected" } if (e.name != null && e.hasOwnProperty("name") && !l.isString(e.name)) return "name: string expected"; if (e.docString != null && e.hasOwnProperty("docString") && !l.isString(e.docString)) return "docString: string expected"; if (e.rawData != null && e.hasOwnProperty("rawData") && !(e.rawData && typeof e.rawData.length == "number" || l.isString(e.rawData))) return "rawData: buffer expected"; if (e.externalData != null && e.hasOwnProperty("externalData")) { if (!Array.isArray(e.externalData)) return "externalData: array expected"; for (r = 0; r < e.externalData.length; ++r) { var i; if (i = o.onnx.StringStringEntryProto.verify(e.externalData[r])) return "externalData." + i } } if (e.dataLocation != null && e.hasOwnProperty("dataLocation")) switch (e.dataLocation) { default: return "dataLocation: enum value expected"; case 0: case 1: } if (e.doubleData != null && e.hasOwnProperty("doubleData")) { if (!Array.isArray(e.doubleData)) return "doubleData: array expected"; for (r = 0; r < e.doubleData.length; ++r) if (typeof e.doubleData[r] != "number") return "doubleData: number[] expected" } if (e.uint64Data != null && e.hasOwnProperty("uint64Data")) { if (!Array.isArray(e.uint64Data)) return "uint64Data: array expected"; for (r = 0; r < e.uint64Data.length; ++r) if (!(l.isInteger(e.uint64Data[r]) || e.uint64Data[r] && l.isInteger(e.uint64Data[r].low) && l.isInteger(e.uint64Data[r].high))) return "uint64Data: integer|Long[] expected" } return null }, t.fromObject = function(e) { if (e instanceof o.onnx.TensorProto) return e; var r = new o.onnx.TensorProto; if (e.dims) { if (!Array.isArray(e.dims)) throw TypeError(".onnx.TensorProto.dims: array expected"); r.dims = []; for (var i = 0; i < e.dims.length; ++i) l.Long ? (r.dims[i] = l.Long.fromValue(e.dims[i])).unsigned = !1 : typeof e.dims[i] == "string" ? r.dims[i] = parseInt(e.dims[i], 10) : typeof e.dims[i] == "number" ? r.dims[i] = e.dims[i] : typeof e.dims[i] == "object" && (r.dims[i] = new l.LongBits(e.dims[i].low >>> 0, e.dims[i].high >>> 0).toNumber()) } if (e.dataType != null && (r.dataType = 0 | e.dataType), e.segment != null) { if (typeof e.segment != "object") throw TypeError(".onnx.TensorProto.segment: object expected"); r.segment = o.onnx.TensorProto.Segment.fromObject(e.segment) } if (e.floatData) { if (!Array.isArray(e.floatData)) throw TypeError(".onnx.TensorProto.floatData: array expected"); for (r.floatData = [], i = 0; i < e.floatData.length; ++i) r.floatData[i] = Number(e.floatData[i]) } if (e.int32Data) { if (!Array.isArray(e.int32Data)) throw TypeError(".onnx.TensorProto.int32Data: array expected"); for (r.int32Data = [], i = 0; i < e.int32Data.length; ++i) r.int32Data[i] = 0 | e.int32Data[i] } if (e.stringData) { if (!Array.isArray(e.stringData)) throw TypeError(".onnx.TensorProto.stringData: array expected"); for (r.stringData = [], i = 0; i < e.stringData.length; ++i) typeof e.stringData[i] == "string" ? l.base64.decode(e.stringData[i], r.stringData[i] = l.newBuffer(l.base64.length(e.stringData[i])), 0) : e.stringData[i].length && (r.stringData[i] = e.stringData[i]) } if (e.int64Data) { if (!Array.isArray(e.int64Data)) throw TypeError(".onnx.TensorProto.int64Data: array expected"); for (r.int64Data = [], i = 0; i < e.int64Data.length; ++i) l.Long ? (r.int64Data[i] = l.Long.fromValue(e.int64Data[i])).unsigned = !1 : typeof e.int64Data[i] == "string" ? r.int64Data[i] = parseInt(e.int64Data[i], 10) : typeof e.int64Data[i] == "number" ? r.int64Data[i] = e.int64Data[i] : typeof e.int64Data[i] == "object" && (r.int64Data[i] = new l.LongBits(e.int64Data[i].low >>> 0, e.int64Data[i].high >>> 0).toNumber()) } if (e.name != null && (r.name = String(e.name)), e.docString != null && (r.docString = String(e.docString)), e.rawData != null && (typeof e.rawData == "string" ? l.base64.decode(e.rawData, r.rawData = l.newBuffer(l.base64.length(e.rawData)), 0) : e.rawData.length && (r.rawData = e.rawData)), e.externalData) { if (!Array.isArray(e.externalData)) throw TypeError(".onnx.TensorProto.externalData: array expected"); for (r.externalData = [], i = 0; i < e.externalData.length; ++i) { if (typeof e.externalData[i] != "object") throw TypeError(".onnx.TensorProto.externalData: object expected"); r.externalData[i] = o.onnx.StringStringEntryProto.fromObject(e.externalData[i]) } } switch (e.dataLocation) { case "DEFAULT": case 0: r.dataLocation = 0; break; case "EXTERNAL": case 1: r.dataLocation = 1 } if (e.doubleData) { if (!Array.isArray(e.doubleData)) throw TypeError(".onnx.TensorProto.doubleData: array expected"); for (r.doubleData = [], i = 0; i < e.doubleData.length; ++i) r.doubleData[i] = Number(e.doubleData[i]) } if (e.uint64Data) { if (!Array.isArray(e.uint64Data)) throw TypeError(".onnx.TensorProto.uint64Data: array expected"); for (r.uint64Data = [], i = 0; i < e.uint64Data.length; ++i) l.Long ? (r.uint64Data[i] = l.Long.fromValue(e.uint64Data[i])).unsigned = !0 : typeof e.uint64Data[i] == "string" ? r.uint64Data[i] = parseInt(e.uint64Data[i], 10) : typeof e.uint64Data[i] == "number" ? r.uint64Data[i] = e.uint64Data[i] : typeof e.uint64Data[i] == "object" && (r.uint64Data[i] = new l.LongBits(e.uint64Data[i].low >>> 0, e.uint64Data[i].high >>> 0).toNumber(!0)) } return r }, t.toObject = function(e, r) { r || (r = {}); var i = {}; if ((r.arrays || r.defaults) && (i.dims = [], i.floatData = [], i.int32Data = [], i.stringData = [], i.int64Data = [], i.doubleData = [], i.uint64Data = [], i.externalData = []), r.defaults && (i.dataType = 0, i.segment = null, i.name = "", r.bytes === String ? i.rawData = "" : (i.rawData = [], r.bytes !== Array && (i.rawData = l.newBuffer(i.rawData))), i.docString = "", i.dataLocation = r.enums === String ? "DEFAULT" : 0), e.dims && e.dims.length) { i.dims = []; for (var d = 0; d < e.dims.length; ++d) typeof e.dims[d] == "number" ? i.dims[d] = r.longs === String ? String(e.dims[d]) : e.dims[d] : i.dims[d] = r.longs === String ? l.Long.prototype.toString.call(e.dims[d]) : r.longs === Number ? new l.LongBits(e.dims[d].low >>> 0, e.dims[d].high >>> 0).toNumber() : e.dims[d] } if (e.dataType != null && e.hasOwnProperty("dataType") && (i.dataType = e.dataType), e.segment != null && e.hasOwnProperty("segment") && (i.segment = o.onnx.TensorProto.Segment.toObject(e.segment, r)), e.floatData && e.floatData.length) for (i.floatData = [], d = 0; d < e.floatData.length; ++d) i.floatData[d] = r.json && !isFinite(e.floatData[d]) ? String(e.floatData[d]) : e.floatData[d]; if (e.int32Data && e.int32Data.length) for (i.int32Data = [], d = 0; d < e.int32Data.length; ++d) i.int32Data[d] = e.int32Data[d]; if (e.stringData && e.stringData.length) for (i.stringData = [], d = 0; d < e.stringData.length; ++d) i.stringData[d] = r.bytes === String ? l.base64.encode(e.stringData[d], 0, e.stringData[d].length) : r.bytes === Array ? Array.prototype.slice.call(e.stringData[d]) : e.stringData[d]; if (e.int64Data && e.int64Data.length) for (i.int64Data = [], d = 0; d < e.int64Data.length; ++d) typeof e.int64Data[d] == "number" ? i.int64Data[d] = r.longs === String ? String(e.int64Data[d]) : e.int64Data[d] : i.int64Data[d] = r.longs === String ? l.Long.prototype.toString.call(e.int64Data[d]) : r.longs === Number ? new l.LongBits(e.int64Data[d].low >>> 0, e.int64Data[d].high >>> 0).toNumber() : e.int64Data[d]; if (e.name != null && e.hasOwnProperty("name") && (i.name = e.name), e.rawData != null && e.hasOwnProperty("rawData") && (i.rawData = r.bytes === String ? l.base64.encode(e.rawData, 0, e.rawData.length) : r.bytes === Array ? Array.prototype.slice.call(e.rawData) : e.rawData), e.doubleData && e.doubleData.length) for (i.doubleData = [], d = 0; d < e.doubleData.length; ++d) i.doubleData[d] = r.json && !isFinite(e.doubleData[d]) ? String(e.doubleData[d]) : e.doubleData[d]; if (e.uint64Data && e.uint64Data.length) for (i.uint64Data = [], d = 0; d < e.uint64Data.length; ++d) typeof e.uint64Data[d] == "number" ? i.uint64Data[d] = r.longs === String ? String(e.uint64Data[d]) : e.uint64Data[d] : i.uint64Data[d] = r.longs === String ? l.Long.prototype.toString.call(e.uint64Data[d]) : r.longs === Number ? new l.LongBits(e.uint64Data[d].low >>> 0, e.uint64Data[d].high >>> 0).toNumber(!0) : e.uint64Data[d]; if (e.docString != null && e.hasOwnProperty("docString") && (i.docString = e.docString), e.externalData && e.externalData.length) for (i.externalData = [], d = 0; d < e.externalData.length; ++d) i.externalData[d] = o.onnx.StringStringEntryProto.toObject(e.externalData[d], r); return e.dataLocation != null && e.hasOwnProperty("dataLocation") && (i.dataLocation = r.enums === String ? o.onnx.TensorProto.DataLocation[e.dataLocation] : e.dataLocation), i }, t.prototype.toJSON = function() { return this.constructor.toObject(this, s.util.toJSONOptions) }, t.DataType = function() { var e = {}, r = Object.create(e); return r[e[0] = "UNDEFINED"] = 0, r[e[1] = "FLOAT"] = 1, r[e[2] = "UINT8"] = 2, r[e[3] = "INT8"] = 3, r[e[4] = "UINT16"] = 4, r[e[5] = "INT16"] = 5, r[e[6] = "INT32"] = 6, r[e[7] = "INT64"] = 7, r[e[8] = "STRING"] = 8, r[e[9] = "BOOL"] = 9, r[e[10] = "FLOAT16"] = 10, r[e[11] = "DOUBLE"] = 11, r[e[12] = "UINT32"] = 12, r[e[13] = "UINT64"] = 13, r[e[14] = "COMPLEX64"] = 14, r[e[15] = "COMPLEX128"] = 15, r[e[16] = "BFLOAT16"] = 16, r }(), t.Segment = function() { function e(r) { if (r) for (var i = Object.keys(r), d = 0; d < i.length; ++d) r[i[d]] != null && (this[i[d]] = r[i[d]]) } return e.prototype.begin = l.Long ? l.Long.fromBits(0, 0, !1) : 0, e.prototype.end = l.Long ? l.Long.fromBits(0, 0, !1) : 0, e.create = function(r) { return new e(r) }, e.encode = function(r, i) { return i || (i = p.create()), r.begin != null && r.hasOwnProperty("begin") && i.uint32(8).int64(r.begin), r.end != null && r.hasOwnProperty("end") && i.uint32(16).int64(r.end), i }, e.encodeDelimited = function(r, i) { return this.encode(r, i).ldelim() }, e.decode = function(r, i) { r instanceof h || (r = h.create(r)); for (var d = i === void 0 ? r.len : r.pos + i, g = new o.onnx.TensorProto.Segment; r.pos < d;) { var m = r.uint32(); switch (m >>> 3) { case 1: g.begin = r.int64(); break; case 2: g.end = r.int64(); break; default: r.skipType(7 & m) } } return g }, e.decodeDelimited = function(r) { return r instanceof h || (r = new h(r)), this.decode(r, r.uint32()) }, e.verify = function(r) { return typeof r != "object" || r === null ? "object expected" : r.begin != null && r.hasOwnProperty("begin") && !(l.isInteger(r.begin) || r.begin && l.isInteger(r.begin.low) && l.isInteger(r.begin.high)) ? "begin: integer|Long expected" : r.end != null && r.hasOwnProperty("end") && !(l.isInteger(r.end) || r.end && l.isInteger(r.end.low) && l.isInteger(r.end.high)) ? "end: integer|Long expected" : null }, e.fromObject = function(r) { if (r instanceof o.onnx.TensorProto.Segment) return r; var i = new o.onnx.TensorProto.Segment; return r.begin != null && (l.Long ? (i.begin = l.Long.fromValue(r.begin)).unsigned = !1 : typeof r.begin == "string" ? i.begin = parseInt(r.begin, 10) : typeof r.begin == "number" ? i.begin = r.begin : typeof r.begin == "object" && (i.begin = new l.LongBits(r.begin.low >>> 0, r.begin.high >>> 0).toNumber())), r.end != null && (l.Long ? (i.end = l.Long.fromValue(r.end)).unsigned = !1 : typeof r.end == "string" ? i.end = parseInt(r.end, 10) : typeof r.end == "number" ? i.end = r.end : typeof r.end == "object" && (i.end = new l.LongBits(r.end.low >>> 0, r.end.high >>> 0).toNumber())), i }, e.toObject = function(r, i) { i || (i = {}); var d = {}; if (i.defaults) { if (l.Long) { var g = new l.Long(0, 0, !1); d.begin = i.longs === String ? g.toString() : i.longs === Number ? g.toNumber() : g } else d.begin = i.longs === String ? "0" : 0; l.Long ? (g = new l.Long(0, 0, !1), d.end = i.longs === String ? g.toString() : i.longs === Number ? g.toNumber() : g) : d.end = i.longs === String ? "0" : 0 } return r.begin != null && r.hasOwnProperty("begin") && (typeof r.begin == "number" ? d.begin = i.longs === String ? String(r.begin) : r.begin : d.begin = i.longs === String ? l.Long.prototype.toString.call(r.begin) : i.longs === Number ? new l.LongBits(r.begin.low >>> 0, r.begin.high >>> 0).toNumber() : r.begin), r.end != null && r.hasOwnProperty("end") && (typeof r.end == "number" ? d.end = i.longs === String ? String(r.end) : r.end : d.end = i.longs === String ? l.Long.prototype.toString.call(r.end) : i.longs === Number ? new l.LongBits(r.end.low >>> 0, r.end.high >>> 0).toNumber() : r.end), d }, e.prototype.toJSON = function() { return this.constructor.toObject(this, s.util.toJSONOptions) }, e }(), t.DataLocation = function() { var e = {}, r = Object.create(e); return r[e[0] = "DEFAULT"] = 0, r[e[1] = "EXTERNAL"] = 1, r }(), t }(), f.TensorShapeProto = function() { function t(e) { if (this.dim = [], e) for (var r = Object.keys(e), i = 0; i < r.length; ++i) e[r[i]] != null && (this[r[i]] = e[r[i]]) } return t.prototype.dim = l.emptyArray, t.create = function(e) { return new t(e) }, t.encode = function(e, r) { if (r || (r = p.create()), e.dim != null && e.dim.length) for (var i = 0; i < e.dim.length; ++i) o.onnx.TensorShapeProto.Dimension.encode(e.dim[i], r.uint32(10).fork()).ldelim(); return r }, t.encodeDelimited = function(e, r) { return this.encode(e, r).ldelim() }, t.decode = function(e, r) { e instanceof h || (e = h.create(e)); for (var i = r === void 0 ? e.len : e.pos + r, d = new o.onnx.TensorShapeProto; e.pos < i;) { var g = e.uint32(); g >>> 3 == 1 ? (d.dim && d.dim.length || (d.dim = []), d.dim.push(o.onnx.TensorShapeProto.Dimension.decode(e, e.uint32()))) : e.skipType(7 & g) } return d }, t.decodeDelimited = function(e) { return e instanceof h || (e = new h(e)), this.decode(e, e.uint32()) }, t.verify = function(e) { if (typeof e != "object" || e === null) return "object expected"; if (e.dim != null && e.hasOwnProperty("dim")) { if (!Array.isArray(e.dim)) return "dim: array expected"; for (var r = 0; r < e.dim.length; ++r) { var i = o.onnx.TensorShapeProto.Dimension.verify(e.dim[r]); if (i) return "dim." + i } } return null }, t.fromObject = function(e) { if (e instanceof o.onnx.TensorShapeProto) return e; var r = new o.onnx.TensorShapeProto; if (e.dim) { if (!Array.isArray(e.dim)) throw TypeError(".onnx.TensorShapeProto.dim: array expected"); r.dim = []; for (var i = 0; i < e.dim.length; ++i) { if (typeof e.dim[i] != "object") throw TypeError(".onnx.TensorShapeProto.dim: object expected"); r.dim[i] = o.onnx.TensorShapeProto.Dimension.fromObject(e.dim[i]) } } return r }, t.toObject = function(e, r) { r || (r = {}); var i = {}; if ((r.arrays || r.defaults) && (i.dim = []), e.dim && e.dim.length) { i.dim = []; for (var d = 0; d < e.dim.length; ++d) i.dim[d] = o.onnx.TensorShapeProto.Dimension.toObject(e.dim[d], r) } return i }, t.prototype.toJSON = function() { return this.constructor.toObject(this, s.util.toJSONOptions) }, t.Dimension = function() { function e(i) { if (i) for (var d = Object.keys(i), g = 0; g < d.length; ++g) i[d[g]] != null && (this[d[g]] = i[d[g]]) } var r; return e.prototype.dimValue = l.Long ? l.Long.fromBits(0, 0, !1) : 0, e.prototype.dimParam = "", e.prototype.denotation = "", Object.defineProperty(e.prototype, "value", { get: l.oneOfGetter(r = ["dimValue", "dimParam"]), set: l.oneOfSetter(r) }), e.create = function(i) { return new e(i) }, e.encode = function(i, d) { return d || (d = p.create()), i.dimValue != null && i.hasOwnProperty("dimValue") && d.uint32(8).int64(i.dimValue), i.dimParam != null && i.hasOwnProperty("dimParam") && d.uint32(18).string(i.dimParam), i.denotation != null && i.hasOwnProperty("denotation") && d.uint32(26).string(i.denotation), d }, e.encodeDelimited = function(i, d) { return this.encode(i, d).ldelim() }, e.decode = function(i, d) { i instanceof h || (i = h.create(i)); for (var g = d === void 0 ? i.len : i.pos + d, m = new o.onnx.TensorShapeProto.Dimension; i.pos < g;) { var _ = i.uint32(); switch (_ >>> 3) { case 1: m.dimValue = i.int64(); break; case 2: m.dimParam = i.string(); break; case 3: m.denotation = i.string(); break; default: i.skipType(7 & _) } } return m }, e.decodeDelimited = function(i) { return i instanceof h || (i = new h(i)), this.decode(i, i.uint32()) }, e.verify = function(i) { if (typeof i != "object" || i === null) return "object expected"; var d = {}; if (i.dimValue != null && i.hasOwnProperty("dimValue") && (d.value = 1, !(l.isInteger(i.dimValue) || i.dimValue && l.isInteger(i.dimValue.low) && l.isInteger(i.dimValue.high)))) return "dimValue: integer|Long expected"; if (i.dimParam != null && i.hasOwnProperty("dimParam")) { if (d.value === 1) return "value: multiple values"; if (d.value = 1, !l.isString(i.dimParam)) return "dimParam: string expected" } return i.denotation != null && i.hasOwnProperty("denotation") && !l.isString(i.denotation) ? "denotation: string expected" : null }, e.fromObject = function(i) { if (i instanceof o.onnx.TensorShapeProto.Dimension) return i; var d = new o.onnx.TensorShapeProto.Dimension; return i.dimValue != null && (l.Long ? (d.dimValue = l.Long.fromValue(i.dimValue)).unsigned = !1 : typeof i.dimValue == "string" ? d.dimValue = parseInt(i.dimValue, 10) : typeof i.dimValue == "number" ? d.dimValue = i.dimValue : typeof i.dimValue == "object" && (d.dimValue = new l.LongBits(i.dimValue.low >>> 0, i.dimValue.high >>> 0).toNumber())), i.dimParam != null && (d.dimParam = String(i.dimParam)), i.denotation != null && (d.denotation = String(i.denotation)), d }, e.toObject = function(i, d) { d || (d = {}); var g = {}; return d.defaults && (g.denotation = ""), i.dimValue != null && i.hasOwnProperty("dimValue") && (typeof i.dimValue == "number" ? g.dimValue = d.longs === String ? String(i.dimValue) : i.dimValue : g.dimValue = d.longs === String ? l.Long.prototype.toString.call(i.dimValue) : d.longs === Number ? new l.LongBits(i.dimValue.low >>> 0, i.dimValue.high >>> 0).toNumber() : i.dimValue, d.oneofs && (g.value = "dimValue")), i.dimParam != null && i.hasOwnProperty("dimParam") && (g.dimParam = i.dimParam, d.oneofs && (g.value = "dimParam")), i.denotation != null && i.hasOwnProperty("denotation") && (g.denotation = i.denotation), g }, e.prototype.toJSON = function() { return this.constructor.toObject(this, s.util.toJSONOptions) }, e }(), t }(), f.TypeProto = function() { function t(r) { if (r) for (var i = Object.keys(r), d = 0; d < i.length; ++d) r[i[d]] != null && (this[i[d]] = r[i[d]]) } var e; return t.prototype.tensorType = null, t.prototype.denotation = "", Object.defineProperty(t.prototype, "value", { get: l.oneOfGetter(e = ["tensorType"]), set: l.oneOfSetter(e) }), t.create = function(r) { return new t(r) }, t.encode = function(r, i) { return i || (i = p.create()), r.tensorType != null && r.hasOwnProperty("tensorType") && o.onnx.TypeProto.Tensor.encode(r.tensorType, i.uint32(10).fork()).ldelim(), r.denotation != null && r.hasOwnProperty("denotation") && i.uint32(50).string(r.denotation), i }, t.encodeDelimited = function(r, i) { return this.encode(r, i).ldelim() }, t.decode = function(r, i) { r instanceof h || (r = h.create(r)); for (var d = i === void 0 ? r.len : r.pos + i, g = new o.onnx.TypeProto; r.pos < d;) { var m = r.uint32(); switch (m >>> 3) { case 1: g.tensorType = o.onnx.TypeProto.Tensor.decode(r, r.uint32()); break; case 6: g.denotation = r.string(); break; default: r.skipType(7 & m) } } return g }, t.decodeDelimited = function(r) { return r instanceof h || (r = new h(r)), this.decode(r, r.uint32()) }, t.verify = function(r) { if (typeof r != "object" || r === null) return "object expected"; if (r.tensorType != null && r.hasOwnProperty("tensorType")) { var i = o.onnx.TypeProto.Tensor.verify(r.tensorType); if (i) return "tensorType." + i } return r.denotation != null && r.hasOwnProperty("denotation") && !l.isString(r.denotation) ? "denotation: string expected" : null }, t.fromObject = function(r) { if (r instanceof o.onnx.TypeProto) return r; var i = new o.onnx.TypeProto; if (r.tensorType != null) { if (typeof r.tensorType != "object") throw TypeError(".onnx.TypeProto.tensorType: object expected"); i.tensorType = o.onnx.TypeProto.Tensor.fromObject(r.tensorType) } return r.denotation != null && (i.denotation = String(r.denotation)), i }, t.toObject = function(r, i) { i || (i = {}); var d = {}; return i.defaults && (d.denotation = ""), r.tensorType != null && r.hasOwnProperty("tensorType") && (d.tensorType = o.onnx.TypeProto.Tensor.toObject(r.tensorType, i), i.oneofs && (d.value = "tensorType")), r.denotation != null && r.hasOwnProperty("denotation") && (d.denotation = r.denotation), d }, t.prototype.toJSON = function() { return this.constructor.toObject(this, s.util.toJSONOptions) }, t.Tensor = function() { function r(i) { if (i) for (var d = Object.keys(i), g = 0; g < d.length; ++g) i[d[g]] != null && (this[d[g]] = i[d[g]]) } return r.prototype.elemType = 0, r.prototype.shape = null, r.create = function(i) { return new r(i) }, r.encode = function(i, d) { return d || (d = p.create()), i.elemType != null && i.hasOwnProperty("elemType") && d.uint32(8).int32(i.elemType), i.shape != null && i.hasOwnProperty("shape") && o.onnx.TensorShapeProto.encode(i.shape, d.uint32(18).fork()).ldelim(), d }, r.encodeDelimited = function(i, d) { return this.encode(i, d).ldelim() }, r.decode = function(i, d) { i instanceof h || (i = h.create(i)); for (var g = d === void 0 ? i.len : i.pos + d, m = new o.onnx.TypeProto.Tensor; i.pos < g;) { var _ = i.uint32(); switch (_ >>> 3) { case 1: m.elemType = i.int32(); break; case 2: m.shape = o.onnx.TensorShapeProto.decode(i, i.uint32()); break; default: i.skipType(7 & _) } } return m }, r.decodeDelimited = function(i) { return i instanceof h || (i = new h(i)), this.decode(i, i.uint32()) }, r.verify = function(i) { if (typeof i != "object" || i === null) return "object expected"; if (i.elemType != null && i.hasOwnProperty("elemType") && !l.isInteger(i.elemType)) return "elemType: integer expected"; if (i.shape != null && i.hasOwnProperty("shape")) { var d = o.onnx.TensorShapeProto.verify(i.shape); if (d) return "shape." + d } return null }, r.fromObject = function(i) { if (i instanceof o.onnx.TypeProto.Tensor) return i; var d = new o.onnx.TypeProto.Tensor; if (i.elemType != null && (d.elemType = 0 | i.elemType), i.shape != null) { if (typeof i.shape != "object") throw TypeError(".onnx.TypeProto.Tensor.shape: object expected"); d.shape = o.onnx.TensorShapeProto.fromObject(i.shape) } return d }, r.toObject = function(i, d) { d || (d = {}); var g = {}; return d.defaults && (g.elemType = 0, g.shape = null), i.elemType != null && i.hasOwnProperty("elemType") && (g.elemType = i.elemType), i.shape != null && i.hasOwnProperty("shape") && (g.shape = o.onnx.TensorShapeProto.toObject(i.shape, d)), g }, r.prototype.toJSON = function() { return this.constructor.toObject(this, s.util.toJSONOptions) }, r }(), t }(), f.OperatorSetIdProto = function() { function t(e) { if (e) for (var r = Object.keys(e), i = 0; i < r.length; ++i) e[r[i]] != null && (this[r[i]] = e[r[i]]) } return t.prototype.domain = "", t.prototype.version = l.Long ? l.Long.fromBits(0, 0, !1) : 0, t.create = function(e) { return new t(e) }, t.encode = function(e, r) { return r || (r = p.create()), e.domain != null && e.hasOwnProperty("domain") && r.uint32(10).string(e.domain), e.version != null && e.hasOwnProperty("version") && r.uint32(16).int64(e.version), r }, t.encodeDelimited = function(e, r) { return this.encode(e, r).ldelim() }, t.decode = function(e, r) { e instanceof h || (e = h.create(e)); for (var i = r === void 0 ? e.len : e.pos + r, d = new o.onnx.OperatorSetIdProto; e.pos < i;) { var g = e.uint32(); switch (g >>> 3) { case 1: d.domain = e.string(); break; case 2: d.version = e.int64(); break; default: e.skipType(7 & g) } } return d }, t.decodeDelimited = function(e) { return e instanceof h || (e = new h(e)), this.decode(e, e.uint32()) }, t.verify = function(e) { return typeof e != "object" || e === null ? "object expected" : e.domain != null && e.hasOwnProperty("domain") && !l.isString(e.domain) ? "domain: string expected" : e.version != null && e.hasOwnProperty("version") && !(l.isInteger(e.version) || e.version && l.isInteger(e.version.low) && l.isInteger(e.version.high)) ? "version: integer|Long expected" : null }, t.fromObject = function(e) { if (e instanceof o.onnx.OperatorSetIdProto) return e; var r = new o.onnx.OperatorSetIdProto; return e.domain != null && (r.domain = String(e.domain)), e.version != null && (l.Long ? (r.version = l.Long.fromValue(e.version)).unsigned = !1 : typeof e.version == "string" ? r.version = parseInt(e.version, 10) : typeof e.version == "number" ? r.version = e.version : typeof e.version == "object" && (r.version = new l.LongBits(e.version.low >>> 0, e.version.high >>> 0).toNumber())), r }, t.toObject = function(e, r) { r || (r = {}); var i = {}; if (r.defaults) if (i.domain = "", l.Long) { var d = new l.Long(0, 0, !1); i.version = r.longs === String ? d.toString() : r.longs === Number ? d.toNumber() : d } else i.version = r.longs === String ? "0" : 0; return e.domain != null && e.hasOwnProperty("domain") && (i.domain = e.domain), e.version != null && e.hasOwnProperty("version") && (typeof e.version == "number" ? i.version = r.longs === String ? String(e.version) : e.version : i.version = r.longs === String ? l.Long.prototype.toString.call(e.version) : r.longs === Number ? new l.LongBits(e.version.low >>> 0, e.version.high >>> 0).toNumber() : e.version), i }, t.prototype.toJSON = function() { return this.constructor.toObject(this, s.util.toJSONOptions) }, t }(), f), b.exports = o }, 2100: (b, n, a) => { b.exports = a(9482) }, 9482: (b, n, a) => { var u = n; function c() { u.util._configure(), u.Writer._configure(u.BufferWriter), u.Reader._configure(u.BufferReader) } u.build = "minimal", u.Writer = a(1173), u.BufferWriter = a(3155), u.Reader = a(1408), u.BufferReader = a(593), u.util = a(9693), u.rpc = a(5994), u.roots = a(5054), u.configure = c, c() }, 1408: (b, n, a) => { b.exports = p; var u, c = a(9693), f = c.LongBits, s = c.utf8; function h(d, g) { return RangeError("index out of range: " + d.pos + " + " + (g || 1) + " > " + d.len) } function p(d) { this.buf = d, this.pos = 0, this.len = d.length } var l, o = typeof Uint8Array < "u" ? function(d) { if (d instanceof Uint8Array || Array.isArray(d)) return new p(d); throw Error("illegal buffer") } : function(d) { if (Array.isArray(d)) return new p(d); throw Error("illegal buffer") }, t = function() { return c.Buffer ? function(d) { return (p.create = function(g) { return c.Buffer.isBuffer(g) ? new u(g) : o(g) })(d) } : o }; function e() { var d = new f(0, 0), g = 0; if (!(this.len - this.pos > 4)) { for (; g < 3; ++g) { if (this.pos >= this.len) throw h(this); if (d.lo = (d.lo | (127 & this.buf[this.pos]) << 7 * g) >>> 0, this.buf[this.pos++] < 128) return d } return d.lo = (d.lo | (127 & this.buf[this.pos++]) << 7 * g) >>> 0, d } for (; g < 4; ++g) if (d.lo = (d.lo | (127 & this.buf[this.pos]) << 7 * g) >>> 0, this.buf[this.pos++] < 128) return d; if (d.lo = (d.lo | (127 & this.buf[this.pos]) << 28) >>> 0, d.hi = (d.hi | (127 & this.buf[this.pos]) >> 4) >>> 0, this.buf[this.pos++] < 128) return d; if (g = 0, this.len - this.pos > 4) { for (; g < 5; ++g) if (d.hi = (d.hi | (127 & this.buf[this.pos]) << 7 * g + 3) >>> 0, this.buf[this.pos++] < 128) return d } else for (; g < 5; ++g) { if (this.pos >= this.len) throw h(this); if (d.hi = (d.hi | (127 & this.buf[this.pos]) << 7 * g + 3) >>> 0, this.buf[this.pos++] < 128) return d } throw Error("invalid varint encoding") } function r(d, g) { return (d[g - 4] | d[g - 3] << 8 | d[g - 2] << 16 | d[g - 1] << 24) >>> 0 } function i() { if (this.pos + 8 > this.len) throw h(this, 8); return new f(r(this.buf, this.pos += 4), r(this.buf, this.pos += 4)) } p.create = t(), p.prototype._slice = c.Array.prototype.subarray || c.Array.prototype.slice, p.prototype.uint32 = (l = 4294967295, function() { if (l = (127 & this.buf[this.pos]) >>> 0, this.buf[this.pos++] < 128 || (l = (l | (127 & this.buf[this.pos]) << 7) >>> 0, this.buf[this.pos++] < 128) || (l = (l | (127 & this.buf[this.pos]) << 14) >>> 0, this.buf[this.pos++] < 128) || (l = (l | (127 & this.buf[this.pos]) << 21) >>> 0, this.buf[this.pos++] < 128) || (l = (l | (15 & this.buf[this.pos]) << 28) >>> 0, this.buf[this.pos++] < 128)) return l; if ((this.pos += 5) > this.len) throw this.pos = this.len, h(this, 10); return l }), p.prototype.int32 = function() { return 0 | this.uint32() }, p.prototype.sint32 = function() { var d = this.uint32(); return d >>> 1 ^ -(1 & d) | 0 }, p.prototype.bool = function() { return this.uint32() !== 0 }, p.prototype.fixed32 = function() { if (this.pos + 4 > this.len) throw h(this, 4); return r(this.buf, this.pos += 4) }, p.prototype.sfixed32 = function() { if (this.pos + 4 > this.len) throw h(this, 4); return 0 | r(this.buf, this.pos += 4) }, p.prototype.float = function() { if (this.pos + 4 > this.len) throw h(this, 4); var d = c.float.readFloatLE(this.buf, this.pos); return this.pos += 4, d }, p.prototype.double = function() { if (this.pos + 8 > this.len) throw h(this, 4); var d = c.float.readDoubleLE(this.buf, this.pos); return this.pos += 8, d }, p.prototype.bytes = function() { var d = this.uint32(), g = this.pos, m = this.pos + d; if (m > this.len) throw h(this, d); return this.pos += d, Array.isArray(this.buf) ? this.buf.slice(g, m) : g === m ? new this.buf.constructor(0) : this._slice.call(this.buf, g, m) }, p.prototype.string = function() { var d = this.bytes(); return s.read(d, 0, d.length) }, p.prototype.skip = function(d) { if (typeof d == "number") { if (this.pos + d > this.len) throw h(this, d); this.pos += d } else do if (this.pos >= this.len) throw h(this); while (128 & this.buf[this.pos++]); return this }, p.prototype.skipType = function(d) { switch (d) { case 0: this.skip(); break; case 1: this.skip(8); break; case 2: this.skip(this.uint32()); break; case 3: for (; (d = 7 & this.uint32()) != 4;) this.skipType(d); break; case 5: this.skip(4); break; default: throw Error("invalid wire type " + d + " at offset " + this.pos) } return this }, p._configure = function(d) { u = d, p.create = t(), u._configure(); var g = c.Long ? "toLong" : "toNumber"; c.merge(p.prototype, { int64: function() { return e.call(this)[g](!1) }, uint64: function() { return e.call(this)[g](!0) }, sint64: function() { return e.call(this).zzDecode()[g](!1) }, fixed64: function() { return i.call(this)[g](!0) }, sfixed64: function() { return i.call(this)[g](!1) } }) } }, 593: (b, n, a) => { b.exports = f; var u = a(1408); (f.prototype = Object.create(u.prototype)).constructor = f; var c = a(9693); function f(s) { u.call(this, s) } f._configure = function() { c.Buffer && (f.prototype._slice = c.Buffer.prototype.slice) }, f.prototype.string = function() { var s = this.uint32(); return this.buf.utf8Slice ? this.buf.utf8Slice(this.pos, this.pos = Math.min(this.pos + s, this.len)) : this.buf.toString("utf-8", this.pos, this.pos = Math.min(this.pos + s, this.len)) }, f._configure() }, 5054: b => { b.exports = {} }, 5994: (b, n, a) => { n.Service = a(7948) }, 7948: (b, n, a) => { b.exports = c; var u = a(9693); function c(f, s, h) { if (typeof f != "function") throw TypeError("rpcImpl must be a function"); u.EventEmitter.call(this), this.rpcImpl = f, this.requestDelimited = !!s, this.responseDelimited = !!h }(c.prototype = Object.create(u.EventEmitter.prototype)).constructor = c, c.prototype.rpcCall = function f(s, h, p, l, o) { if (!l) throw TypeError("request must be specified"); var t = this; if (!o) return u.asPromise(f, t, s, h, p, l); if (t.rpcImpl) try { return t.rpcImpl(s, h[t.requestDelimited ? "encodeDelimited" : "encode"](l).finish(), function(e, r) { if (e) return t.emit("error", e, s), o(e); if (r !== null) { if (!(r instanceof p)) try { r = p[t.responseDelimited ? "decodeDelimited" : "decode"](r) } catch (i) { return t.emit("error", i, s), o(i) } return t.emit("data", r, s), o(null, r) } t.end(!0) }) } catch (e) { return t.emit("error", e, s), void setTimeout(function() { o(e) }, 0) } else setTimeout(function() { o(Error("already ended")) }, 0) }, c.prototype.end = function(f) { return this.rpcImpl && (f || this.rpcImpl(null, null, null), this.rpcImpl = null, this.emit("end").off()), this } }, 1945: (b, n, a) => { b.exports = c; var u = a(9693); function c(p, l) { this.lo = p >>> 0, this.hi = l >>> 0 } var f = c.zero = new c(0, 0); f.toNumber = function() { return 0 }, f.zzEncode = f.zzDecode = function() { return this }, f.length = function() { return 1 }; var s = c.zeroHash = "\0\0\0\0\0\0\0\0"; c.fromNumber = function(p) { if (p === 0) return f; var l = p < 0; l && (p = -p); var o = p >>> 0, t = (p - o) / 4294967296 >>> 0; return l && (t = ~t >>> 0, o = ~o >>> 0, ++o > 4294967295 && (o = 0, ++t > 4294967295 && (t = 0))), new c(o, t) }, c.from = function(p) { if (typeof p == "number") return c.fromNumber(p); if (u.isString(p)) { if (!u.Long) return c.fromNumber(parseInt(p, 10)); p = u.Long.fromString(p) } return p.low || p.high ? new c(p.low >>> 0, p.high >>> 0) : f }, c.prototype.toNumber = function(p) { if (!p && this.hi >>> 31) { var l = 1 + ~this.lo >>> 0, o = ~this.hi >>> 0; return l || (o = o + 1 >>> 0), -(l + 4294967296 * o) } return this.lo + 4294967296 * this.hi }, c.prototype.toLong = function(p) { return u.Long ? new u.Long(0 | this.lo, 0 | this.hi, !!p) : { low: 0 | this.lo, high: 0 | this.hi, unsigned: !!p } }; var h = String.prototype.charCodeAt; c.fromHash = function(p) { return p === s ? f : new c((h.call(p, 0) | h.call(p, 1) << 8 | h.call(p, 2) << 16 | h.call(p, 3) << 24) >>> 0, (h.call(p, 4) | h.call(p, 5) << 8 | h.call(p, 6) << 16 | h.call(p, 7) << 24) >>> 0) }, c.prototype.toHash = function() { return String.fromCharCode(255 & this.lo, this.lo >>> 8 & 255, this.lo >>> 16 & 255, this.lo >>> 24, 255 & this.hi, this.hi >>> 8 & 255, this.hi >>> 16 & 255, this.hi >>> 24) }, c.prototype.zzEncode = function() { var p = this.hi >> 31; return this.hi = ((this.hi << 1 | this.lo >>> 31) ^ p) >>> 0, this.lo = (this.lo << 1 ^ p) >>> 0, this }, c.prototype.zzDecode = function() { var p = -(1 & this.lo); return this.lo = ((this.lo >>> 1 | this.hi << 31) ^ p) >>> 0, this.hi = (this.hi >>> 1 ^ p) >>> 0, this }, c.prototype.length = function() { var p = this.lo, l = (this.lo >>> 28 | this.hi << 4) >>> 0, o = this.hi >>> 24; return o === 0 ? l === 0 ? p < 16384 ? p < 128 ? 1 : 2 : p < 2097152 ? 3 : 4 : l < 16384 ? l < 128 ? 5 : 6 : l < 2097152 ? 7 : 8 : o < 128 ? 9 : 10 } }, 9693: function(b, n, a) { var u = n; function c(s, h, p) { for (var l = Object.keys(h), o = 0; o < l.length; ++o) s[l[o]] !== void 0 && p || (s[l[o]] = h[l[o]]); return s } function f(s) { function h(p, l) { if (!(this instanceof h)) return new h(p, l); Object.defineProperty(this, "message", { get: function() { return p } }), Error.captureStackTrace ? Error.captureStackTrace(this, h) : Object.defineProperty(this, "stack", { value: new Error().stack || "" }), l && c(this, l) } return (h.prototype = Object.create(Error.prototype)).constructor = h, Object.defineProperty(h.prototype, "name", { get: function() { return s } }), h.prototype.toString = function() { return this.name + ": " + this.message }, h } u.asPromise = a(4537), u.base64 = a(7419), u.EventEmitter = a(9211), u.float = a(945), u.inquire = a(7199), u.utf8 = a(4997), u.pool = a(6662), u.LongBits = a(1945), u.isNode = !!(a.g !== void 0 && a.g && a.g.process && a.g.process.versions && a.g.process.versions.node), u.global = u.isNode && a.g || typeof window < "u" && window || typeof self < "u" && self || this, u.emptyArray = Object.freeze ? Object.freeze([]) : [], u.emptyObject = Object.freeze ? Object.freeze({}) : {}, u.isInteger = Number.isInteger || function(s) { return typeof s == "number" && isFinite(s) && Math.floor(s) === s }, u.isString = function(s) { return typeof s == "string" || s instanceof String }, u.isObject = function(s) { return s && typeof s == "object" }, u.isset = u.isSet = function(s, h) { var p = s[h]; return !(p == null || !s.hasOwnProperty(h)) && (typeof p != "object" || (Array.isArray(p) ? p.length : Object.keys(p).length) > 0) }, u.Buffer = function() { try { var s = u.inquire("buffer").Buffer; return s.prototype.utf8Write ? s : null } catch { return null } }(), u._Buffer_from = null, u._Buffer_allocUnsafe = null, u.newBuffer = function(s) { return typeof s == "number" ? u.Buffer ? u._Buffer_allocUnsafe(s) : new u.Array(s) : u.Buffer ? u._Buffer_from(s) : typeof Uint8Array > "u" ? s : new Uint8Array(s) }, u.Array = typeof Uint8Array < "u" ? Uint8Array : Array, u.Long = u.global.dcodeIO && u.global.dcodeIO.Long || u.global.Long || u.inquire("long"), u.key2Re = /^true|false|0|1$/, u.key32Re = /^-?(?:0|[1-9][0-9]*)$/, u.key64Re = /^(?:[\\x00-\\xff]{8}|-?(?:0|[1-9][0-9]*))$/, u.longToHash = function(s) { return s ? u.LongBits.from(s).toHash() : u.LongBits.zeroHash }, u.longFromHash = function(s, h) { var p = u.LongBits.fromHash(s); return u.Long ? u.Long.fromBits(p.lo, p.hi, h) : p.toNumber(!!h) }, u.merge = c, u.lcFirst = function(s) { return s.charAt(0).toLowerCase() + s.substring(1) }, u.newError = f, u.ProtocolError = f("ProtocolError"), u.oneOfGetter = function(s) { for (var h = {}, p = 0; p < s.length; ++p) h[s[p]] = 1; return function() { for (var l = Object.keys(this), o = l.length - 1; o > -1; --o) if (h[l[o]] === 1 && this[l[o]] !== void 0 && this[l[o]] !== null) return l[o] } }, u.oneOfSetter = function(s) { return function(h) { for (var p = 0; p < s.length; ++p) s[p] !== h && delete this[s[p]] } }, u.toJSONOptions = { longs: String, enums: String, bytes: String, json: !0 }, u._configure = function() { var s = u.Buffer; s ? (u._Buffer_from = s.from !== Uint8Array.from && s.from || function(h, p) { return new s(h, p) }, u._Buffer_allocUnsafe = s.allocUnsafe || function(h) { return new s(h) }) : u._Buffer_from = u._Buffer_allocUnsafe = null } }, 1173: (b, n, a) => { b.exports = t; var u, c = a(9693), f = c.LongBits, s = c.base64, h = c.utf8; function p(_, y, T) { this.fn = _, this.len = y, this.next = void 0, this.val = T } function l() {} function o(_) { this.head = _.head, this.tail = _.tail, this.len = _.len, this.next = _.states } function t() { this.len = 0, this.head = new p(l, 0, 0), this.tail = this.head, this.states = null } var e = function() { return c.Buffer ? function() { return (t.create = function() { return new u })() } : function() { return new t } }; function r(_, y, T) { y[T] = 255 & _ } function i(_, y) { this.len = _, this.next = void 0, this.val = y } function d(_, y, T) { for (; _.hi;) y[T++] = 127 & _.lo | 128, _.lo = (_.lo >>> 7 | _.hi << 25) >>> 0, _.hi >>>= 7; for (; _.lo > 127;) y[T++] = 127 & _.lo | 128, _.lo = _.lo >>> 7; y[T++] = _.lo } function g(_, y, T) { y[T] = 255 & _, y[T + 1] = _ >>> 8 & 255, y[T + 2] = _ >>> 16 & 255, y[T + 3] = _ >>> 24 } t.create = e(), t.alloc = function(_) { return new c.Array(_) }, c.Array !== Array && (t.alloc = c.pool(t.alloc, c.Array.prototype.subarray)), t.prototype._push = function(_, y, T) { return this.tail = this.tail.next = new p(_, y, T), this.len += y, this }, i.prototype = Object.create(p.prototype), i.prototype.fn = function(_, y, T) { for (; _ > 127;) y[T++] = 127 & _ | 128, _ >>>= 7; y[T] = _ }, t.prototype.uint32 = function(_) { return this.len += (this.tail = this.tail.next = new i((_ >>>= 0) < 128 ? 1 : _ < 16384 ? 2 : _ < 2097152 ? 3 : _ < 268435456 ? 4 : 5, _)).len, this }, t.prototype.int32 = function(_) { return _ < 0 ? this._push(d, 10, f.fromNumber(_)) : this.uint32(_) }, t.prototype.sint32 = function(_) { return this.uint32((_ << 1 ^ _ >> 31) >>> 0) }, t.prototype.uint64 = function(_) { var y = f.from(_); return this._push(d, y.length(), y) }, t.prototype.int64 = t.prototype.uint64, t.prototype.sint64 = function(_) { var y = f.from(_).zzEncode(); return this._push(d, y.length(), y) }, t.prototype.bool = function(_) { return this._push(r, 1, _ ? 1 : 0) }, t.prototype.fixed32 = function(_) { return this._push(g, 4, _ >>> 0) }, t.prototype.sfixed32 = t.prototype.fixed32, t.prototype.fixed64 = function(_) { var y = f.from(_); return this._push(g, 4, y.lo)._push(g, 4, y.hi) }, t.prototype.sfixed64 = t.prototype.fixed64, t.prototype.float = function(_) { return this._push(c.float.writeFloatLE, 4, _) }, t.prototype.double = function(_) { return this._push(c.float.writeDoubleLE, 8, _) }; var m = c.Array.prototype.set ? function(_, y, T) { y.set(_, T) } : function(_, y, T) { for (var w = 0; w < _.length; ++w) y[T + w] = _[w] }; t.prototype.bytes = function(_) { var y = _.length >>> 0; if (!y) return this._push(r, 1, 0); if (c.isString(_)) { var T = t.alloc(y = s.length(_)); s.decode(_, T, 0), _ = T } return this.uint32(y)._push(m, y, _) }, t.prototype.string = function(_) { var y = h.length(_); return y ? this.uint32(y)._push(h.write, y, _) : this._push(r, 1, 0) }, t.prototype.fork = function() { return this.states = new o(this), this.head = this.tail = new p(l, 0, 0), this.len = 0, this }, t.prototype.reset = function() { return this.states ? (this.head = this.states.head, this.tail = this.states.tail, this.len = this.states.len, this.states = this.states.next) : (this.head = this.tail = new p(l, 0, 0), this.len = 0), this }, t.prototype.ldelim = function() { var _ = this.head, y = this.tail, T = this.len; return this.reset().uint32(T), T && (this.tail.next = _.next, this.tail = y, this.len += T), this }, t.prototype.finish = function() { for (var _ = this.head.next, y = this.constructor.alloc(this.len), T = 0; _;) _.fn(_.val, y, T), T += _.len, _ = _.next; return y }, t._configure = function(_) { u = _, t.create = e(), u._configure() } }, 3155: (b, n, a) => { b.exports = f; var u = a(1173); (f.prototype = Object.create(u.prototype)).constructor = f; var c = a(9693); function f() { u.call(this) } function s(h, p, l) { h.length < 40 ? c.utf8.write(h, p, l) : p.utf8Write ? p.utf8Write(h, l) : p.write(h, l) } f._configure = function() { f.alloc = c._Buffer_allocUnsafe, f.writeBytesBuffer = c.Buffer && c.Buffer.prototype instanceof Uint8Array && c.Buffer.prototype.set.name === "set" ? function(h, p, l) { p.set(h, l) } : function(h, p, l) { if (h.copy) h.copy(p, l, 0, h.length); else for (var o = 0; o < h.length;) p[l++] = h[o++] } }, f.prototype.bytes = function(h) { c.isString(h) && (h = c._Buffer_from(h, "base64")); var p = h.length >>> 0; return this.uint32(p), p && this._push(f.writeBytesBuffer, p, h), this }, f.prototype.string = function(h) { var p = c.Buffer.byteLength(h); return this.uint32(p), p && this._push(s, p, h), this }, f._configure() }, 7714: (b, n, a) => { n.R = void 0; const u = a(6919), c = a(7448); n.R = new class { async init() {} async createSessionHandler(f, s) { const h = new u.Session(s); return await h.loadModel(f), new c.OnnxjsSessionHandler(h) } } }, 4200: (b, n, a) => { n.c8 = n.rX = void 0; const u = a(1670), c = a(5381), f = a(2157), s = a(2306); n.rX = () => { if ((typeof u.env.wasm.initTimeout != "number" || u.env.wasm.initTimeout < 0) && (u.env.wasm.initTimeout = 0), typeof u.env.wasm.simd != "boolean" && (u.env.wasm.simd = !0), typeof u.env.wasm.proxy != "boolean" && (u.env.wasm.proxy = !1), typeof u.env.wasm.numThreads != "number" || !Number.isInteger(u.env.wasm.numThreads) || u.env.wasm.numThreads <= 0) { const h = typeof navigator > "u" ? (0, c.cpus)().length : navigator.hardwareConcurrency; u.env.wasm.numThreads = Math.min(4, Math.ceil((h || 1) / 2)) } }, n.c8 = new class { async init() { (0, n.rX)(), await (0, f.initWasm)() } async createSessionHandler(h, p) { const l = new s.OnnxruntimeWebAssemblySessionHandler; return await l.loadModel(h, p), Promise.resolve(l) } } }, 6018: function(b, n, a) { var u = this && this.__createBinding || (Object.create ? function(s, h, p, l) { l === void 0 && (l = p); var o = Object.getOwnPropertyDescriptor(h, p); o && !("get" in o ? !h.__esModule : o.writable || o.configurable) || (o = { enumerable: !0, get: function() { return h[p] } }), Object.defineProperty(s, l, o) } : function(s, h, p, l) { l === void 0 && (l = p), s[l] = h[p] }), c = this && this.__exportStar || function(s, h) { for (var p in s) p === "default" || Object.prototype.hasOwnProperty.call(h, p) || u(h, s, p) }; Object.defineProperty(n, "__esModule", { value: !0 }), c(a(1670), n); const f = a(1670); { const s = a(7714).R; (0, f.registerBackend)("webgl", s, -10) } { const s = a(4200).c8; (0, f.registerBackend)("cpu", s, 10), (0, f.registerBackend)("wasm", s, 10), (0, f.registerBackend)("xnnpack", s, 9) } }, 246: (b, n) => { Object.defineProperty(n, "__esModule", { value: !0 }), n.createAttributeWithCacheKey = void 0; class a { constructor(c) { Object.assign(this, c) } get cacheKey() { return this._cacheKey || (this._cacheKey = Object.getOwnPropertyNames(this).sort().map(c => `${this[c]}`).join(";")), this._cacheKey } } n.createAttributeWithCacheKey = u => new a(u) }, 7778: (b, n, a) => { Object.defineProperty(n, "__esModule", { value: !0 }), n.Attribute = void 0; const u = a(1446), c = a(9395), f = a(9162), s = a(2517); var h = c.onnxruntime.experimental.fbs; class p { constructor(o) { if (this._attributes = new Map, o != null) { for (const t of o) t instanceof u.onnx.AttributeProto ? this._attributes.set(t.name, [p.getValue(t), p.getType(t)]) : t instanceof h.Attribute && this._attributes.set(t.name(), [p.getValue(t), p.getType(t)]); if (this._attributes.size < o.length) throw new Error("duplicated attribute names") } } set(o, t, e) { this._attributes.set(o, [e, t]) } delete(o) { this._attributes.delete(o) } getFloat(o, t) { return this.get(o, "float", t) } getInt(o, t) { return this.get(o, "int", t) } getString(o, t) { return this.get(o, "string", t) } getTensor(o, t) { return this.get(o, "tensor", t) } getFloats(o, t) { return this.get(o, "floats", t) } getInts(o, t) { return this.get(o, "ints", t) } getStrings(o, t) { return this.get(o, "strings", t) } getTensors(o, t) { return this.get(o, "tensors", t) } get(o, t, e) { const r = this._attributes.get(o); if (r === void 0) { if (e !== void 0) return e; throw new Error(`required attribute not found: ${o}`) } if (r[1] !== t) throw new Error(`type mismatch: expected ${t} but got ${r[1]}`); return r[0] } static getType(o) { const t = o instanceof u.onnx.AttributeProto ? o.type : o.type(); switch (t) { case u.onnx.AttributeProto.AttributeType.FLOAT: return "float"; case u.onnx.AttributeProto.AttributeType.INT: return "int"; case u.onnx.AttributeProto.AttributeType.STRING: return "string"; case u.onnx.AttributeProto.AttributeType.TENSOR: return "tensor"; case u.onnx.AttributeProto.AttributeType.FLOATS: return "floats"; case u.onnx.AttributeProto.AttributeType.INTS: return "ints"; case u.onnx.AttributeProto.AttributeType.STRINGS: return "strings"; case u.onnx.AttributeProto.AttributeType.TENSORS: return "tensors"; default: throw new Error(`attribute type is not supported yet: ${u.onnx.AttributeProto.AttributeType[t]}`) } } static getValue(o) { const t = o instanceof u.onnx.AttributeProto ? o.type : o.type(); if (t === u.onnx.AttributeProto.AttributeType.GRAPH || t === u.onnx.AttributeProto.AttributeType.GRAPHS) throw new Error("graph attribute is not supported yet"); const e = this.getValueNoCheck(o); if (t === u.onnx.AttributeProto.AttributeType.INT && s.LongUtil.isLong(e)) return s.LongUtil.longToNumber(e); if (t === u.onnx.AttributeProto.AttributeType.INTS) { const r = e, i = new Array(r.length); for (let d = 0; d < r.length; d++) { const g = r[d]; i[d] = s.LongUtil.longToNumber(g) } return i } if (t === u.onnx.AttributeProto.AttributeType.TENSOR) return o instanceof u.onnx.AttributeProto ? f.Tensor.fromProto(e) : f.Tensor.fromOrtTensor(e); if (t === u.onnx.AttributeProto.AttributeType.TENSORS) { if (o instanceof u.onnx.AttributeProto) return e.map(r => f.Tensor.fromProto(r)); if (o instanceof h.Attribute) return e.map(r => f.Tensor.fromOrtTensor(r)) } if (t === u.onnx.AttributeProto.AttributeType.STRING && o instanceof u.onnx.AttributeProto) { const r = e; return (0, s.decodeUtf8String)(r) } return t === u.onnx.AttributeProto.AttributeType.STRINGS && o instanceof u.onnx.AttributeProto ? e.map(s.decodeUtf8String) : e } static getValueNoCheck(o) { return o instanceof u.onnx.AttributeProto ? this.getValueNoCheckFromOnnxFormat(o) : this.getValueNoCheckFromOrtFormat(o) } static getValueNoCheckFromOnnxFormat(o) { switch (o.type) { case u.onnx.AttributeProto.AttributeType.FLOAT: return o.f; case u.onnx.AttributeProto.AttributeType.INT: return o.i; case u.onnx.AttributeProto.AttributeType.STRING: return o.s; case u.onnx.AttributeProto.AttributeType.TENSOR: return o.t; case u.onnx.AttributeProto.AttributeType.GRAPH: return o.g; case u.onnx.AttributeProto.AttributeType.FLOATS: return o.floats; case u.onnx.AttributeProto.AttributeType.INTS: return o.ints; case u.onnx.AttributeProto.AttributeType.STRINGS: return o.strings; case u.onnx.AttributeProto.AttributeType.TENSORS: return o.tensors; case u.onnx.AttributeProto.AttributeType.GRAPHS: return o.graphs; default: throw new Error(`unsupported attribute type: ${u.onnx.AttributeProto.AttributeType[o.type]}`) } } static getValueNoCheckFromOrtFormat(o) { switch (o.type()) { case h.AttributeType.FLOAT: return o.f(); case h.AttributeType.INT: return o.i(); case h.AttributeType.STRING: return o.s(); case h.AttributeType.TENSOR: return o.t(); case h.AttributeType.GRAPH: return o.g(); case h.AttributeType.FLOATS: return o.floatsArray(); case h.AttributeType.INTS: { const t = []; for (let e = 0; e < o.intsLength(); e++) t.push(o.ints(e)); return t } case h.AttributeType.STRINGS: { const t = []; for (let e = 0; e < o.stringsLength(); e++) t.push(o.strings(e)); return t } case h.AttributeType.TENSORS: { const t = []; for (let e = 0; e < o.tensorsLength(); e++) t.push(o.tensors(e)); return t } default: throw new Error(`unsupported attribute type: ${h.AttributeType[o.type()]}`) } } } n.Attribute = p }, 7091: (b, n, a) => { Object.defineProperty(n, "__esModule", { value: !0 }), n.resolveBackend = n.backend = void 0; const u = a(5038), c = new Map; async function f(s) { const h = n.backend; if (h[s] !== void 0 && function(p) { const l = p; return "initialize" in l && typeof l.initialize == "function" && "createSessionHandler" in l && typeof l.createSessionHandler == "function" && "dispose" in l && typeof l.dispose == "function" }(h[s])) { const p = h[s]; let l = p.initialize(); if (typeof l == "object" && "then" in l && (l = await l), l) return c.set(s, p), p } } n.backend = { webgl: new u.WebGLBackend }, n.resolveBackend = async function s(h) { if (!h) return s(["webgl"]); { const p = typeof h == "string" ? [h] : h; for (const l of p) { const o = c.get(l); if (o) return o; const t = await f(l); if (t) return t } } throw new Error("no available backend to use") } }, 5038: (b, n, a) => { Object.defineProperty(n, "__esModule", { value: !0 }), n.WebGLBackend = void 0; const u = a(1670), c = a(6231), f = a(6416), s = a(7305); n.WebGLBackend = class { get contextId() { return u.env.webgl.contextId } set contextId(h) { u.env.webgl.contextId = h } get matmulMaxBatchSize() { return u.env.webgl.matmulMaxBatchSize } set matmulMaxBatchSize(h) { u.env.webgl.matmulMaxBatchSize = h } get textureCacheMode() { return u.env.webgl.textureCacheMode } set textureCacheMode(h) { u.env.webgl.textureCacheMode = h } get pack() { return u.env.webgl.pack } set pack(h) { u.env.webgl.pack = h } get async() { return u.env.webgl.async } set async(h) { u.env.webgl.async = h } initialize() { try { return this.glContext = (0, s.createWebGLContext)(this.contextId), typeof this.matmulMaxBatchSize != "number" && (this.matmulMaxBatchSize = 16), typeof this.textureCacheMode != "string" && (this.textureCacheMode = "full"), typeof this.pack != "boolean" && (this.pack = !1), typeof this.async != "boolean" && (this.async = !1), c.Logger.setWithEnv(u.env), c.Logger.verbose("WebGLBackend", `Created WebGLContext: ${typeof this.glContext} with matmulMaxBatchSize: ${this.matmulMaxBatchSize}; textureCacheMode: ${this.textureCacheMode}; pack: ${this.pack}; async: ${this.async}.`), !0 } catch (h) { return c.Logger.warning("WebGLBackend", `Unable to initialize WebGLBackend. ${h}`), !1 } } createSessionHandler(h) { return new f.WebGLSessionHandler(this, h) } dispose() { this.glContext.dispose() } } }, 5107: (b, n, a) => { Object.defineProperty(n, "__esModule", { value: !0 }), n.CoordsGlslLib = void 0; const u = a(2517), c = a(8520), f = a(5060), s = a(7859), h = a(9390); class p extends c.GlslLib { constructor(o) { super(o) } getFunctions() { return Object.assign(Object.assign(Object.assign(Object.assign(Object.assign(Object.assign(Object.assign({}, this.offsetToCoords()), this.coordsToOffset()), this.toVec()), this.valueFrom()), this.getCommonUtilFuncs()), this.getInputsSamplingSnippets()), this.getOutputSamplingSnippet()) } getCustomTypes() { return {} } offsetToCoords() { return { offsetToCoords: new c.GlslLibRoutine(` vec2 offsetToCoords(int offset, int width, int height) { int t = offset / width; int s = offset - t*width; vec2 coords = (vec2(s,t) + vec2(0.5,0.5)) / vec2(width, height); return coords; } `) } } coordsToOffset() { return { coordsToOffset: new c.GlslLibRoutine(` int coordsToOffset(vec2 coords, int width, int height) { float s = coords.s * float(width); float t = coords.t * float(height); int offset = int(t) * width + int(s); return offset; } `) } } getOutputSamplingSnippet() { const o = this.context.outputTextureLayout; return o.isPacked ? this.getPackedOutputSamplingSnippet(o) : this.getUnpackedOutputSamplingSnippet(o) } getPackedOutputSamplingSnippet(o) { const t = o.unpackedShape, e = [o.width, o.height], r = {}, i = "getOutputCoords"; switch (t.length) { case 0: r[i] = this.getOutputScalarCoords(); break; case 1: r[i] = this.getOutputPacked1DCoords(t, e); break; case 2: r[i] = this.getOutputPacked2DCoords(t, e); break; case 3: r[i] = this.getOutputPacked3DCoords(t, e); break; default: r[i] = this.getOutputPackedNDCoords(t, e) } const d = ` void setOutput(vec4 val) { ${(0,f.getGlsl)(this.context.glContext.version).output} = val; } `; return r.floatTextureSetRGBA = new c.GlslLibRoutine(d), r } getUnpackedOutputSamplingSnippet(o) { const t = o.unpackedShape, e = [o.width, o.height], r = {}, i = "getOutputCoords"; switch (t.length) { case 0: r[i] = this.getOutputScalarCoords(); break; case 1: r[i] = this.getOutputUnpacked1DCoords(t, e); break; case 2: r[i] = this.getOutputUnpacked2DCoords(t, e); break; case 3: r[i] = this.getOutputUnpacked3DCoords(t, e); break; case 4: r[i] = this.getOutputUnpacked4DCoords(t, e); break; case 5: r[i] = this.getOutputUnpacked5DCoords(t, e); break; case 6: r[i] = this.getOutputUnpacked6DCoords(t, e); break; default: throw new Error(`Unsupported output dimensionality: ${t.length}`) } const d = ` void setOutput(float val) { ${(0,f.getGlsl)(this.context.glContext.version).output} = vec4(val, 0, 0, 0); } `; return r.floatTextureSetR = new c.GlslLibRoutine(d), r } getOutputScalarCoords() { return new c.GlslLibRoutine(` int getOutputCoords() { return 0; } `) } getOutputPacked1DCoords(o, t) { const e = t; let r = ""; return e[0] === 1 ? (r = ` int getOutputCoords() { return 2 * int(TexCoords.y * ${e[1]}.0); } `, new c.GlslLibRoutine(r)) : e[1] === 1 ? (r = ` int getOutputCoords() { return 2 * int(TexCoords.x * ${e[0]}.0); } `, new c.GlslLibRoutine(r)) : (r = ` int getOutputCoords() { ivec2 resTexRC = ivec2(TexCoords.xy * vec2(${e[0]}, ${e[1]})); return 2 * (resTexRC.y * ${e[0]} + resTexRC.x); } `, new c.GlslLibRoutine(r)) } getOutputPacked2DCoords(o, t) { let e = ""; if (u.ArrayUtil.arraysEqual(o, t)) return e = ` ivec2 getOutputCoords() { return 2 * ivec2(TexCoords.xy * vec2(${t[0]}, ${t[1]})); } `, new c.GlslLibRoutine(e); const r = t, i = Math.ceil(o[1] / 2); return e = ` ivec2 getOutputCoords() { ivec2 resTexRC = ivec2(TexCoords.xy * vec2(${r[0]}, ${r[1]})); int index = resTexRC.y * ${r[0]} + resTexRC.x; // reverse r and c order for packed texture int r = imod(index, ${i}) * 2; int c = 2 * (index / ${i}); return ivec2(r, c); } `, new c.GlslLibRoutine(e) } getOutputPacked3DCoords(o, t) { const e = [t[0], t[1]], r = Math.ceil(o[2] / 2), i = r * Math.ceil(o[1] / 2), d = ` ivec3 getOutputCoords() { ivec2 resTexRC = ivec2(TexCoords.xy * vec2(${e[0]}, ${e[1]})); int index = resTexRC.y * ${e[0]} + resTexRC.x; int b = index / ${i}; index -= b * ${i}; // reverse r and c order for packed texture int r = imod(index, ${r}) * 2; int c = 2 * (index / ${r}); return ivec3(b, r, c); } `; return new c.GlslLibRoutine(d) } getOutputPackedNDCoords(o, t) { const e = [t[0], t[1]], r = Math.ceil(o[o.length - 1] / 2), i = r * Math.ceil(o[o.length - 2] / 2); let d = i, g = "", m = "b, r, c"; for (let y = 2; y < o.length - 1; y++) d *= o[o.length - y - 1], g = ` int b${y} = index / ${d}; index -= b${y} * ${d}; ` + g, m = `b${y}, ` + m; const _ = ` ivec${o.length} getOutputCoords() { ivec2 resTexRC = ivec2(TexCoords.xy * vec2(${e[0]}, ${e[1]})); int index = resTexRC.y * ${e[0]} + resTexRC.x; ${g} int b = index / ${i}; index -= b * ${i}; // reverse r and c order for packed texture int r = imod(index, ${r}) * 2; int c = 2 * (index / ${r}); return ivec${o.length}(${m}); } `; return new c.GlslLibRoutine(_) } getOutputUnpacked1DCoords(o, t) { const e = ` int getOutputCoords() { ivec2 resTexRC = ivec2(TexCoords.xy * vec2(${t[0]}, ${t[1]})); return resTexRC.y * ${t[0]} + resTexRC.x; } `; return new c.GlslLibRoutine(e) } getOutputUnpacked2DCoords(o, t) { const e = ` ivec2 getOutputCoords() { ivec2 resTexRC = ivec2(TexCoords.xy * vec2(${t[0]}, ${t[1]})); int index = resTexRC.y * ${t[0]} + resTexRC.x; int r = index / ${o[1]}; int c = index - r * ${o[1]}; return ivec2(r, c); } `; return new c.GlslLibRoutine(e) } getOutputUnpacked3DCoords(o, t) { let e = ""; const r = o.length; let i = null; r < 2 && (i = []), i = new Array(r - 1), i[r - 2] = o[r - 1]; for (let m = r - 3; m >= 0; --m) i[m] = i[m + 1] * o[m + 1]; const d = ["r", "c", "d"], g = i.map((m, _) => `int ${d[_]} = index / ${m}; ${_===i.length-1?`int ${d[_+1]} = index - ${d[_]} * ${m}`:`index -= ${d[_]} * ${m}`};`).join(""); return e = ` ivec3 getOutputCoords() { ivec2 resTexRC = ivec2(TexCoords.xy * vec2(${t[0]}, ${t[1]})); int index = resTexRC.y * ${t[0]} + resTexRC.x; ${g} return ivec3(r, c, d); } `, new c.GlslLibRoutine(e) } getOutputUnpacked4DCoords(o, t) { let e = ""; const r = o.length; let i = null; r < 2 && (i = []), i = new Array(r - 1), i[r - 2] = o[r - 1]; for (let m = r - 3; m >= 0; --m) i[m] = i[m + 1] * o[m + 1]; const d = ["r", "c", "d", "d2"], g = i.map((m, _) => `int ${d[_]} = index / ${m}; ${_===i.length-1?`int ${d[_+1]} = index - ${d[_]} * ${m}`:`index -= ${d[_]} * ${m}`};`).join(""); return e = ` ivec4 getOutputCoords() { ivec2 resTexRC = ivec2(TexCoords.xy * vec2(${t[0]}, ${t[1]})); int index = resTexRC.y * ${t[0]} + resTexRC.x; ${g} return ivec4(r, c, d, d2); } `, new c.GlslLibRoutine(e) } getOutputUnpacked5DCoords(o, t) { let e = ""; const r = o.length; let i = null; r < 2 && (i = []), i = new Array(r - 1), i[r - 2] = o[r - 1]; for (let m = r - 3; m >= 0; --m) i[m] = i[m + 1] * o[m + 1]; const d = ["r", "c", "d", "d2", "d3"], g = i.map((m, _) => `int ${d[_]} = index / ${m}; ${_===i.length-1?`int ${d[_+1]} = index - ${d[_]} * ${m}`:`index -= ${d[_]} * ${m}`};`).join(""); return e = ` ivec5 getOutputCoords() { ivec2 resTexRC = ivec2(TexCoords.xy * vec2(${t[0]}, ${t[1]})); int index = resTexRC.y * ${t[0]} + resTexRC.x; ${g} return ivec5(r, c, d, d2, d3); } `, new c.GlslLibRoutine(e) } getOutputUnpacked6DCoords(o, t) { let e = ""; const r = o.length; let i = null; r < 2 && (i = []), i = new Array(r - 1), i[r - 2] = o[r - 1]; for (let m = r - 3; m >= 0; --m) i[m] = i[m + 1] * o[m + 1]; const d = ["r", "c", "d", "d2", "d3", "d4"], g = i.map((m, _) => `int ${d[_]} = index / ${m}; ${_===i.length-1?`int ${d[_+1]} = index - ${d[_]} * ${m}`:`index -= ${d[_]} * ${m}`};`).join(""); return e = ` ivec6 getOutputCoords() { ivec2 resTexRC = ivec2(TexCoords.xy * vec2(${t[0]}, ${t[1]})); int index = resTexRC.y * ${t[0]} + resTexRC.x; ${g} return ivec6(r, c, d, d2, d3, d4); } `, new c.GlslLibRoutine(e) } getCommonUtilFuncs() { const o = {}; let t = "uvFromFlat"; o[t] = new c.GlslLibRoutine(` vec2 uvFromFlat(int texNumR, int texNumC, int index) { int texC = index / texNumR; int texR = index - texC * texNumR; // TODO: swap texR, texC order in following function so row is corresponding to u and column is corresponding to // v. return (vec2(texR, texC) + halfCR) / vec2(texNumR, texNumC); } `), t = "packedUVfrom1D", o[t] = new c.GlslLibRoutine(` vec2 packedUVfrom1D(int texNumR, int texNumC, int index) { int texelIndex = index / 2; int texR = texelIndex / texNumC; int texC = texelIndex - texR * texNumC; return (vec2(texC, texR) + halfCR) / vec2(texNumC, texNumR); } `), t = "packedUVfrom2D", o[t] = new c.GlslLibRoutine(` vec2 packedUVfrom2D(int texNumR, int texNumC, int texelsInLogicalRow, int row, int col) { int texelIndex = (row / 2) * texelsInLogicalRow + (col / 2); int texR = texelIndex / texNumC; int texC = texelIndex - texR * texNumC; return (vec2(texC, texR) + halfCR) / vec2(texNumC, texNumR); } `), t = "packedUVfrom3D", o[t] = new c.GlslLibRoutine(` vec2 packedUVfrom3D(int texNumR, int texNumC, int texelsInBatch, int texelsInLogicalRow, int b, int row, int col) { int index = b * texelsInBatch + (row / 2) * texelsInLogicalRow + (col / 2); int texR = index / texNumC; int texC = index - texR * texNumC; return (vec2(texC, texR) + halfCR) / vec2(texNumC, texNumR); } `), t = "sampleTexture"; const e = (0, f.getGlsl)(this.context.glContext.version); return o[t] = new c.GlslLibRoutine(` float sampleTexture(sampler2D textureSampler, vec2 uv) { return ${e.texture2D}(textureSampler, uv).r; }`), o } getInputsSamplingSnippets() { const o = {}, t = this.context.outputTextureLayout; return this.context.programInfo.inputNames.forEach((e, r) => { const i = this.context.inputTextureLayouts[r], d = (0, h.generateShaderFuncNameFromInputSamplerName)(e); i.isPacked ? o[d] = this.getPackedSamplerFromInput(d, e, i) : o[d] = this.getUnpackedSamplerFromInput(d, e, i); const g = (0, h.generateShaderFuncNameFromInputSamplerNameAtOutCoords)(e); i.unpackedShape.length <= t.unpackedShape.length && (i.isPacked ? o[g] = this.getPackedSamplerAtOutputCoords(g, i, t, e) : o[g] = this.getUnpackedSamplerAtOutputCoords(g, i, t, e)) }), o } getPackedSamplerAtOutputCoords(o, t, e, r) { const i = t.unpackedShape, d = e.unpackedShape, g = r, m = (0, h.generateShaderFuncNameFromInputSamplerName)(g), _ = i.length, y = d.length, T = u.BroadcastUtil.getBroadcastDims(i, d), w = (0, h.getCoordsDataType)(y), S = y - _; let O; const E = (0, h.getGlChannels)(); O = _ === 0 ? "" : y < 2 && T.length >= 1 ? "coords = 0;" : T.map(k => `coords.${E[k+S]} = 0;`).join(` `); let v = ""; v = y < 2 && _ > 0 ? "coords" : i.map((k, Y) => `coords.${E[Y+S]}`).join(", "); let P = "return outputValue;"; const L = u.ShapeUtil.size(i) === 1, V = u.ShapeUtil.size(d) === 1; if (_ !== 1 || L || V) { if (L && !V) P = y === 1 ? ` return vec4(outputValue.x, outputValue.x, 0., 0.); ` : ` return vec4(outputValue.x); `; else if (T.length) { const k = _ - 2, Y = _ - 1; T.indexOf(k) > -1 && T.indexOf(Y) > -1 ? P = "return vec4(outputValue.x);" : T.indexOf(k) > -1 ? P = "return vec4(outputValue.x, outputValue.y, outputValue.x, outputValue.y);" : T.indexOf(Y) > -1 && (P = "return vec4(outputValue.xx, outputValue.zz);") } } else P = ` return vec4(outputValue.xy, outputValue.xy); `; const R = ` vec4 ${o}() { ${w} coords = getOutputCoords(); int lastDim = coords.${E[y-1]}; coords.${E[y-1]} = coords.${E[y-2]}; coords.${E[y-2]} = lastDim; ${O} vec4 outputValue = ${m}(${v}); ${P} } `; return new c.GlslLibRoutine(R, ["coordinates.getOutputCoords"]) } getUnpackedSamplerAtOutputCoords(o, t, e, r) { const i = [e.width, e.height], d = [t.width, t.height], g = t.unpackedShape.length, m = e.unpackedShape.length, _ = t.unpackedShape, y = e.unpackedShape, T = (0, h.generateShaderFuncNameFromInputSamplerName)(r); if (g === m && u.ArrayUtil.arraysEqual(d, i)) { const V = ` float ${o}() { return sampleTexture(${r}, TexCoords); } `; return new c.GlslLibRoutine(V, ["coordinates.sampleTexture"]) } const w = (0, h.getCoordsDataType)(m), S = u.BroadcastUtil.getBroadcastDims(_, y), O = m - g; let E; const v = (0, h.getGlChannels)(); E = g === 0 ? "" : m < 2 && S.length >= 1 ? "coords = 0;" : S.map(V => `coords.${v[V+O]} = 0;`).join(` `); let P = ""; P = m < 2 && g > 0 ? "coords" : t.unpackedShape.map((V, R) => `coords.${v[R+O]}`).join(", "); const L = ` float ${o}() { ${w} coords = getOutputCoords(); ${E} return ${T}(${P}); } `; return new c.GlslLibRoutine(L, ["coordinates.getOutputCoords"]) } getPackedSamplerFromInput(o, t, e) { switch (e.unpackedShape.length) { case 0: return this.getPackedSamplerScalar(o, t); case 1: return this.getPackedSampler1D(o, t, e); case 2: return this.getPackedSampler2D(o, t, e); case 3: return this.getPackedSampler3D(o, t, e); default: return this.getPackedSamplerND(o, t, e) } } getUnpackedSamplerFromInput(o, t, e) { const r = e.unpackedShape; switch (r.length) { case 0: return this.getUnpackedSamplerScalar(o, t, e); case 1: return this.getUnpackedSampler1D(o, t, e); case 2: return this.getUnpackedSampler2D(o, t, e); case 3: return this.getUnpackedSampler3D(o, t, e); case 4: return this.getUnpackedSampler4D(o, t, e); case 5: return this.getUnpackedSampler5D(o, t, e); case 6: return this.getUnpackedSampler6D(o, t, e); default: throw new Error(`Unsupported dimension ${r.length}-D`) } } getPackedSamplerScalar(o, t) { const e = ` vec4 ${o}() { return ${(0,f.getGlsl)(this.context.glContext.version).texture2D}(${t}, halfCR); } `; return new c.GlslLibRoutine(e) } getPackedSampler1D(o, t, e) { const r = [e.width, e.height], i = [r[1], r[0]], d = (0, f.getGlsl)(this.context.glContext.version), g = `vec4 ${o}(int index) { vec2 uv = packedUVfrom1D( ${i[0]}, ${i[1]}, index); return ${d.texture2D}(${t}, uv); }`; return new c.GlslLibRoutine(g, ["coordinates.packedUVfrom1D"]) } getPackedSampler2D(o, t, e) { const r = e.unpackedShape, i = [e.width, e.height], d = (0, f.getGlsl)(this.context.glContext.version), g = i[0], m = i[1]; if (i != null && u.ArrayUtil.arraysEqual(r, i)) { const w = `vec4 ${o}(int row, int col) { vec2 uv = (vec2(col, row) + halfCR) / vec2(${m}.0, ${g}.0); return ${d.texture2D}(${t}, uv); }`; return new c.GlslLibRoutine(w) } const _ = i, y = Math.ceil(r[1] / 2), T = `vec4 ${o}(int row, int col) { vec2 uv = packedUVfrom2D(${_[1]}, ${_[0]}, ${y}, row, col); return ${d.texture2D}(${t}, uv); }`; return new c.GlslLibRoutine(T, ["coordinates.packedUVfrom2D"]) } getPackedSampler3D(o, t, e) { const r = e.unpackedShape, i = [e.width, e.height], d = [i[0], i[1]], g = (0, f.getGlsl)(this.context.glContext.version); if (r[0] === 1) { const w = r.slice(1), S = [1, 2], O = (0, h.squeezeInputShape)(r, w), E = ["b", "row", "col"], v = JSON.parse(JSON.stringify(e)); v.unpackedShape = O; const P = this.getPackedSamplerFromInput(o, t, v), L = `${P.routineBody} vec4 ${o}(int b, int row, int col) { return ${o}(${(0,h.getSqueezedParams)(E,S)}); } `; return new c.GlslLibRoutine(L, P.dependencies) } const m = d[0], _ = d[1], y = Math.ceil(r[2] / 2), T = `vec4 ${o}(int b, int row, int col) { vec2 uv = packedUVfrom3D( ${_}, ${m}, ${y*Math.ceil(r[1]/2)}, ${y}, b, row, col); return ${g.texture2D}(${t}, uv);}`; return new c.GlslLibRoutine(T, ["coordinates.packedUVfrom3D"]) } getPackedSamplerND(o, t, e) { const r = e.unpackedShape, i = r.length, d = [e.width, e.height], g = (0, f.getGlsl)(this.context.glContext.version), m = [d[0], d[1]], _ = m[1], y = m[0], T = Math.ceil(r[i - 1] / 2); let w = T * Math.ceil(r[i - 2] / 2), S = "int b, int row, int col", O = `b * ${w} + (row / 2) * ${T} + (col / 2)`; for (let v = 2; v < i - 1; v++) S = `int b${v}, ` + S, w *= r[i - v - 1], O = `b${v} * ${w} + ` + O; const E = `vec4 ${o}(${S}) { int index = ${O}; int texR = index / ${y}; int texC = index - texR * ${y}; vec2 uv = (vec2(texC, texR) + halfCR) / vec2(${y}, ${_}); return ${g.texture2D}(${t}, uv); }`; return new c.GlslLibRoutine(E) } getUnpackedSamplerScalar(o, t, e) { const [r, i] = [e.width, e.height]; if (r === 1 && i === 1) { const g = ` float ${o}() { return sampleTexture(${t}, halfCR); } `; return new c.GlslLibRoutine(g, ["coordinates.sampleTexture"]) } const d = ` float ${o}() { int offset_${t} = coordsToOffset(TexCoords, ${r}, ${i}); vec2 uv = uvFromFlat(${r}, ${i}, offset_${t}); return sampleTexture(${t}, uv); } `; return new c.GlslLibRoutine(d, ["coordinates.uvFromFlat", "coordinates.sampleTexture", "coordinates.coordsToOffset"]) } getUnpackedSampler1D(o, t, e) { const r = e.width, i = e.height; if (i === 1 && r === 1) { const g = ` float ${o}(int index) { return sampleTexture(${t}, halfCR); } `; return new c.GlslLibRoutine(g, ["coordinates.sampleTexture"]) } if (i === 1) { const g = ` float ${o}(int index) { vec2 uv = vec2((float(index) + 0.5) / ${r}.0, 0.5); return sampleTexture(${t}, uv); } `; return new c.GlslLibRoutine(g, ["coordinates.sampleTexture"]) } if (r === 1) { const g = ` float ${o}(int index) { vec2 uv = vec2(0.5, (float(index) + 0.5) / ${i}.0); return sampleTexture(${t}, uv); } `; return new c.GlslLibRoutine(g, ["coordinates.sampleTexture"]) } const d = ` float ${o}(int index) { vec2 uv = uvFromFlat(${r}, ${i}, index); return sampleTexture(${t}, uv); } `; return new c.GlslLibRoutine(d, ["coordinates.uvFromFlat", "coordinates.sampleTexture"]) } getUnpackedSampler2D(o, t, e) { const r = e.unpackedShape, i = [e.height, e.width]; if (i != null && u.ArrayUtil.arraysEqual(r, i)) { const w = ` float ${o}(int row, int col) { vec2 uv = (vec2(row, col) + halfCR) / vec2(${i[1]}.0, ${i[0]}.0); return sampleTexture(${t}, uv); } `; return new c.GlslLibRoutine(w, ["coordinates.sampleTexture"]) } const { newShape: d, keptDims: g } = (0, s.squeezeShape)(r), m = d; if (m.length < r.length) { const w = (0, h.squeezeInputShape)(r, m), S = JSON.parse(JSON.stringify(e)); S.unpackedShape = w; const O = ["col", "row"], E = ` ${this.getUnpackedSamplerFromInput(o,t,S).routineBody} float ${o}(int row, int col) { return ${o}(${(0,h.getSqueezedParams)(O,g)}); } `; return new c.GlslLibRoutine(E, ["coordinates.sampleTexture"]) } const _ = i[1], y = i[0]; if (y === 1) { const w = ` float ${o}(int row, int col) { int offset_${t} = coordsToOffset(TexCoords, ${_}, ${y}); float index = dot(vec3(row, col, offset_${t}), vec3(${r[1]}, 1, 1)); vec2 uv = vec2(0.5, (index + 0.5) / ${_}.0); return sampleTexture(${t}, uv); } `; return new c.GlslLibRoutine(w, ["coordinates.sampleTexture", "coordinates.coordsToOffset"]) } if (_ === 1) { const w = ` float ${o}(int row, int col) { int offset_${t} = coordsToOffset(TexCoords, ${_}, ${y}); float index = dot(vec3(row, col, offset_${t}), vec3(${r[1]}, 1, 1)); vec2 uv = vec2((index + 0.5) / ${y}.0, 0.5); return sampleTexture(${t}, uv); } `; return new c.GlslLibRoutine(w, ["coordinates.sampleTexture", "coordinates.coordsToOffset"]) } const T = ` float ${o}(int row, int col) { int index = col * ${r[1]} + row; vec2 uv = uvFromFlat(${_}, ${y}, index); return sampleTexture(${t}, uv); } `; return new c.GlslLibRoutine(T, ["coordinates.uvFromFlat", "coordinates.sampleTexture", "coordinates.coordsToOffset"]) } getUnpackedSampler3D(o, t, e) { const r = e.unpackedShape, i = r[1] * r[2], d = r[2], { newShape: g, keptDims: m } = (0, s.squeezeShape)(r), _ = g; if (_.length < r.length) { const T = (0, h.squeezeInputShape)(r, _), w = ["batch", "col", "row"], S = JSON.parse(JSON.stringify(e)); S.unpackedShape = T; const O = this.getUnpackedSamplerFromInput(o, t, S), E = m.reverse(), v = ` ${O.routineBody} float ${o}(int batch, int row, int col) { return ${o}(${(0,h.getSqueezedParams)(w,E)}); } `; return new c.GlslLibRoutine(v, O.dependencies) } const y = ` float ${o}(int depth, int row, int col) { // Explicitly use integer operations as dot() only works on floats. int index = depth * ${i} + col * ${d} + row; vec2 uv = uvFromFlat(${e.width}, ${e.height}, index); return sampleTexture(${t}, uv); } `; return new c.GlslLibRoutine(y, ["coordinates.uvFromFlat", "coordinates.sampleTexture", "coordinates.coordsToOffset"]) } getUnpackedSampler4D(o, t, e) { const r = e.unpackedShape, i = r[3], d = r[2] * i, g = ` float ${o}(int row, int col, int depth, int depth2) { int index = row * ${r[1]*d} + col * ${d} + depth2 * ${i} + depth; vec2 uv = uvFromFlat(${e.width}, ${e.height}, index); return sampleTexture(${t}, uv); } `; return new c.GlslLibRoutine(g, ["coordinates.uvFromFlat", "coordinates.sampleTexture"]) } getUnpackedSampler5D(o, t, e) { const r = e.unpackedShape, i = r[4], d = r[3] * i, g = r[2] * d, m = r[1] * g, { newShape: _, keptDims: y } = (0, s.squeezeShape)(r); if (_.length < r.length) { const w = (0, h.squeezeInputShape)(r, _), S = ["row", "col", "depth", "depth2", "depth3"], O = JSON.parse(JSON.stringify(e)); O.unpackedShape = w; const E = ` ${this.getUnpackedSamplerFromInput(o,t,O).routineBody} float ${o}(int row, int col, int depth, int depth2, int depth3) { return ${o}(${(0,h.getSqueezedParams)(S,y)}); } `; return new c.GlslLibRoutine(E, ["coordinates.sampleTexture", "coordinates.uvFromFlat"]) } const T = ` float ${o}(int row, int col, int depth, int depth2, int depth3) { int index = row * ${m} + col * ${g} + depth * ${d} + depth3 * ${i} + depth2; vec2 uv = uvFromFlat(${e.width}, ${e.height}, index); return sampleTexture(${t}, uv); } `; return new c.GlslLibRoutine(T, ["coordinates.sampleTexture", "coordinates.uvFromFlat"]) } getUnpackedSampler6D(o, t, e) { const r = e.unpackedShape, i = r[5], d = r[4] * i, g = r[3] * d, m = r[2] * g, _ = r[1] * m, { newShape: y, keptDims: T } = (0, s.squeezeShape)(r); if (y.length < r.length) { const S = (0, h.squeezeInputShape)(r, y), O = ["row", "col", "depth", "depth2", "depth3", "depth4"], E = JSON.parse(JSON.stringify(e)); E.unpackedShape = S; const v = ` ${this.getUnpackedSamplerFromInput(o,t,E).routineBody} float ${o}(int row, int col, int depth, int depth2, int depth3, int depth4) { return ${o}(${(0,h.getSqueezedParams)(O,T)}); } `; return new c.GlslLibRoutine(v, ["coordinates.sampleTexture", "coordinates.uvFromFlat"]) } const w = ` float ${o}(int row, int col, int depth, int depth2, int depth3, int depth4) { int index = row * ${_} + col * ${m} + depth * ${g} + depth2 * ${d} + depth3 * ${i} + depth4; vec2 uv = uvFromFlat(${e.width}, ${e.height}, index); return sampleTexture(${t}, uv); } `; return new c.GlslLibRoutine(w, ["coordinates.uvFromFlat", "coordinates.sampleTexture", "coordinates.coordsToOffset"]) } toVec() { const o = this.context.outputTextureLayout, t = o.shape.length, e = o.strides, r = o.width, i = o.height, d = []; for (let m = 0; m < t - 1; ++m) d.push(` c[${m}] = offset / ${e[m]};`), d.push(` offset -= c[${m}] * ${e[m]};`); d.push(` c[${t-1}] = offset;`); const g = ` void toVec(vec2 texCoords, out int c[${t}]) { int offset = coordsToOffset(texCoords, ${r}, ${i}); ${d.join("")} } void toVec(int offset, out int c[${t}]) { ${d.join("")} } `; return { toVec: new c.GlslLibRoutine(g, ["coordinates.coordsToOffset"]) } } valueFrom() { const o = {}; return this.context.programInfo.inputNames.forEach((t, e) => { const r = this.context.inputTextureLayouts[e], i = (r.unpackedShape.length > 0 ? r.unpackedShape : r.shape).length; let d = `_${t}`; o[d] = new c.GlslLibRoutine(this.getValueFromSingle(t, i, r.width, r.height, !1), [`shapeUtils.indicesToOffset${d}`, "coordinates.offsetToCoords", "fragcolor.getColorAsFloat"]), d += "_T", o[d] = new c.GlslLibRoutine(this.getValueFromSingle(t, i, r.width, r.height, !0), [`shapeUtils.indicesToOffset${d}`, "coordinates.offsetToCoords", "fragcolor.getColorAsFloat"]) }), o } getValueFromSingle(o, t, e, r, i) { let d = `_${o}`; return i && (d += "_T"), ` float ${d}(int m[${t}]) { int offset = indicesToOffset${d}(m); vec2 coords = offsetToCoords(offset, ${e}, ${r}); float value = getColorAsFloat(${(0,f.getGlsl)(this.context.glContext.version).texture2D}(${o}, coords)); return value; } ` } getPackedValueFrom(o, t, e, r, i) { let d = `_${o}_Pack`; return i && (d += "_T"), ` vec4 ${d}(int m[${t}]) { int offset = indicesToOffset_${o}(m); vec2 coords = offsetToCoords(offset, ${e}, ${r}); return ${(0,f.getGlsl)(this.context.glContext.version).texture2D}(${o}, coords); } ` } } n.CoordsGlslLib = p }, 8520: (b, n) => { var a; Object.defineProperty(n, "__esModule", { value: !0 }), n.TopologicalSortGlslRoutines = n.GlslLibRoutineNode = n.GlslLibRoutine = n.GlslLib = n.GlslContext = n.FunctionType = void 0, (a = n.FunctionType || (n.FunctionType = {}))[a.ValueBased = 0] = "ValueBased", a[a.Positional = 1] = "Positional", n.GlslContext = class { constructor(u, c, f, s) { this.glContext = u, this.programInfo = c, this.inputTextureLayouts = f, this.outputTextureLayout = s } }, n.GlslLib = class { constructor(u) { this.context = u } }, n.GlslLibRoutine = class { constructor(u, c) { this.routineBody = u, this.dependencies = c } }, n.GlslLibRoutineNode = class { constructor(u, c, f) { this.name = u, this.dependencies = f || [], c && (this.routineBody = c) } addDependency(u) { u && this.dependencies.push(u) } }, n.TopologicalSortGlslRoutines = class { static returnOrderedNodes(u) { if (!u || u.length === 0) return []; if (u.length === 1) return u; const c = new Set, f = new Set, s = new Array; return this.createOrderedNodes(u, c, f, s), s } static createOrderedNodes(u, c, f, s) { for (let h = 0; h < u.length; ++h) this.dfsTraverse(u[h], c, f, s) } static dfsTraverse(u, c, f, s) { if (!u || f.has(u.name)) return; if (c.has(u.name)) throw new Error("Cyclic dependency detected. Can't topologically sort routines needed for shader."); c.add(u.name); const h = u.dependencies; if (h && h.length > 0) for (let p = 0; p < h.length; ++p) this.dfsTraverse(h[p], c, f, s); s.push(u), f.add(u.name), c.delete(u.name) } } }, 7341: (b, n, a) => { Object.defineProperty(n, "__esModule", { value: !0 }), n.EncodingGlslLib = void 0; const u = a(8520); class c extends u.GlslLib { constructor(s) { super(s) } getFunctions() { return Object.assign(Object.assign({}, this.encodeFloat32()), this.decodeFloat32()) } getCustomTypes() { return {} } encodeFloat32() { return { encode: new u.GlslLibRoutine(`highp vec4 encode(highp float f) { return vec4(f, 0.0, 0.0, 0.0); } `) } } decodeFloat32() { return { decode: new u.GlslLibRoutine(`highp float decode(highp vec4 rgba) { return rgba.r; } `) } } encodeUint8() { const s = c.isLittleEndian() ? "rgba.rgba=rgba.abgr;" : ""; return { encode: new u.GlslLibRoutine(` highp vec4 encode(highp float f) { highp float F = abs(f); highp float Sign = step(0.0,-f); highp float Exponent = floor(log2(F)); highp float Mantissa = (exp2(- Exponent) * F); Exponent = floor(log2(F) + 127.0) + floor(log2(Mantissa)); highp vec4 rgba; rgba[0] = 128.0 * Sign + floor(Exponent*exp2(-1.0)); rgba[1] = 128.0 * mod(Exponent,2.0) + mod(floor(Mantissa*128.0),128.0); rgba[2] = floor(mod(floor(Mantissa*exp2(23.0 -8.0)),exp2(8.0))); rgba[3] = floor(exp2(23.0)*mod(Mantissa,exp2(-15.0))); ${s} rgba = rgba / 255.0; // values need to be normalized to [0,1] return rgba; } `) } } decodeUint8() { const s = c.isLittleEndian() ? "rgba.rgba=rgba.abgr;" : ""; return { decode: new u.GlslLibRoutine(` highp float decode(highp vec4 rgba) { rgba = rgba * 255.0; // values need to be de-normalized from [0,1] to [0,255] ${s} highp float Sign = 1.0 - step(128.0,rgba[0])*2.0; highp float Exponent = 2.0 * mod(rgba[0],128.0) + step(128.0,rgba[1]) - 127.0; highp float Mantissa = mod(rgba[1],128.0)*65536.0 + rgba[2]*256.0 +rgba[3] + float(0x800000); highp float Result = Sign * exp2(Exponent) * (Mantissa * exp2(-23.0 )); return Result; } `) } } static isLittleEndian() { const s = new ArrayBuffer(4), h = new Uint32Array(s), p = new Uint8Array(s); if (h[0] = 3735928559, p[0] === 239) return !0; if (p[0] === 222) return !1; throw new Error("unknown endianness") } } n.EncodingGlslLib = c }, 9894: (b, n, a) => { Object.defineProperty(n, "__esModule", { value: !0 }), n.FragColorGlslLib = void 0; const u = a(8520), c = a(5060); class f extends u.GlslLib { constructor(h) { super(h) } getFunctions() { return Object.assign(Object.assign({}, this.setFragColor()), this.getColorAsFloat()) } getCustomTypes() { return {} } setFragColor() { const h = (0, c.getGlsl)(this.context.glContext.version); return { setFragColor: new u.GlslLibRoutine(` void setFragColor(float value) { ${h.output} = encode(value); } `, ["encoding.encode"]) } } getColorAsFloat() { return { getColorAsFloat: new u.GlslLibRoutine(` float getColorAsFloat(vec4 color) { return decode(color); } `, ["encoding.decode"]) } } } n.FragColorGlslLib = f }, 2848: (b, n) => { Object.defineProperty(n, "__esModule", { value: !0 }), n.replaceInlines = void 0; const a = /@inline[\s\n\r]+(\w+)[\s\n\r]+([0-9a-zA-Z_]+)\s*\(([^)]*)\)\s*{(([^}]|[\n\r])*)}/gm; n.replaceInlines = function(u) { const c = {}; let f; for (; (f = a.exec(u)) !== null;) { const s = f[3].split(",").map(h => { const p = h.trim().split(" "); return p && p.length === 2 ? { type: p[0], name: p[1] } : null }).filter(h => h !== null); c[f[2]] = { params: s, body: f[4] } } for (const s in c) { const h = "(\\w+)?\\s+([_0-9a-zA-Z]+)\\s+=\\s+__FUNC__\\((.*)\\)\\s*;".replace("__FUNC__", s), p = new RegExp(h, "gm"); for (; (f = p.exec(u)) !== null;) { const l = f[1], o = f[2], t = f[3].split(","), e = l ? `${l} ${o};` : ""; let r = c[s].body, i = ""; c[s].params.forEach((g, m) => { g && (i += `${g.type} ${g.name} = ${t[m]}; `) }), r = `${i} ${r}`, r = r.replace("return", `${o} = `); const d = ` ${e} { ${r} } `; u = u.replace(f[0], d) } } return u.replace(a, "") } }, 8879: (b, n, a) => { Object.defineProperty(n, "__esModule", { value: !0 }), n.GlslPreprocessor = void 0; const u = a(8520), c = a(2848), f = a(5483), s = a(5060); n.GlslPreprocessor = class { constructor(h, p, l, o) { this.libs = {}, this.glslLibRoutineDependencyGraph = {}, this.context = new u.GlslContext(h, p, l, o), Object.keys(f.glslRegistry).forEach(e => { const r = new f.glslRegistry[e](this.context); this.libs[e] = r }); const t = this.glslLibRoutineDependencyGraph; for (const e in this.libs) { const r = this.libs[e].getFunctions(); for (const i in r) { const d = e + "." + i; let g; t[d] ? (g = t[d], g.routineBody = r[i].routineBody) : (g = new u.GlslLibRoutineNode(d, r[i].routineBody), t[d] = g); const m = r[i].dependencies; if (m) for (let _ = 0; _ < m.length; ++_) if (t[m[_]]) g.addDependency(t[m[_]]); else { const y = new u.GlslLibRoutineNode(m[_]); t[m[_]] = y, g.addDependency(y) } } } } preprocess() { const h = this.context.programInfo; let p = h.shaderSource; return this.context.programInfo.hasMain || (p = `${p} ${(0,s.getDefaultFragShaderMain)(this.context.glContext.version,this.context.outputTextureLayout.shape.length)}`), p = (0, c.replaceInlines)(p), `${(0,s.getFragShaderPreamble)(this.context.glContext.version)} ${this.getUniforms(h.inputNames,h.variables)} ${this.getImports(p)} ${p}` } getImports(h) { const p = this.selectGlslLibRoutinesToBeIncluded(h); if (p.length === 0) return ""; let l = ""; for (let o = 0; o < p.length; ++o) { if (!p[o].routineBody) throw new Error(`Missing body for the Glsl Library routine: ${p[o].name}`); l += p[o].routineBody + ` ` } return l } selectGlslLibRoutinesToBeIncluded(h) { const p = []; return Object.keys(this.glslLibRoutineDependencyGraph).forEach(l => { const o = l.split(".")[1]; h.indexOf(o) !== -1 && p.push(this.glslLibRoutineDependencyGraph[l]) }), u.TopologicalSortGlslRoutines.returnOrderedNodes(p) } getUniforms(h, p) { const l = []; if (h) for (const o of h) l.push(`uniform sampler2D ${o};`); if (p) for (const o of p) l.push(`uniform ${o.type} ${o.name}${o.arrayLength?`[${o.arrayLength}]`:""};`); return l.join(` `) } } }, 5483: (b, n, a) => { Object.defineProperty(n, "__esModule", { value: !0 }), n.glslRegistry = void 0; const u = a(5107), c = a(7341), f = a(9894), s = a(2655), h = a(3891); n.glslRegistry = { encoding: c.EncodingGlslLib, fragcolor: f.FragColorGlslLib, vec: h.VecGlslLib, shapeUtils: s.ShapeUtilsGlslLib, coordinates: u.CoordsGlslLib } }, 2655: (b, n, a) => { Object.defineProperty(n, "__esModule", { value: !0 }), n.ShapeUtilsGlslLib = void 0; const u = a(8520); class c extends u.GlslLib { constructor(s) { super(s) } getFunctions() { return Object.assign(Object.assign(Object.assign(Object.assign(Object.assign({}, this.bcastIndex()), this.bcastMatmulIndex()), this.offsetToIndices()), this.indicesToOffset()), this.incrementIndices()) } getCustomTypes() { return {} } bcastIndex() { const s = this.context.outputTextureLayout.shape.length, h = {}; return this.context.programInfo.inputNames.forEach((p, l) => { const o = this.context.inputTextureLayouts[l].unpackedShape; if (o.length <= s) { const t = o.length, e = s - t, r = `bcastIndices_${p}`; let i = ""; for (let g = 0; g < t; ++g) i += ` realIndices[${g}] = int( mod(float(bcastedIndices[${e+g}]), ${o[g]}.0) ); `; const d = ` void ${r} (int bcastedIndices[${s}], out int realIndices[${t}]) { ${i} } `; h[r] = new u.GlslLibRoutine(d) } }), h } bcastMatmulIndex() { const s = this.context.outputTextureLayout.shape.length, h = {}; return this.context.programInfo.inputNames.forEach((p, l) => { const o = this.context.inputTextureLayouts[l].shape; if (!(o.length < 2 || o.length > s)) { const t = o.length, e = s - t, r = `bcastMatmulIndices_${p}`; let i = ""; for (let g = 0; g < t - 2; ++g) i += ` realIndices[${g}] = int( mod(float(bcastedIndices[${e+g}]), ${o[g]}.0) ); `; const d = ` void ${r}(int bcastedIndices[${s}], out int realIndices[${t}]) { ${i} realIndices[${t-1}] = bcastedIndices[${s-1}]; realIndices[${t-2}] = bcastedIndices[${s-2}]; } `; h[r] = new u.GlslLibRoutine(d) } }), h } indicesToOffset() { const s = {}; return this.context.programInfo.inputNames.forEach((h, p) => { const l = this.context.inputTextureLayouts[p].shape, o = this.context.inputTextureLayouts[p].strides, t = l.length; let e = `indicesToOffset_${h}`; s[e] = new u.GlslLibRoutine(c.indexToOffsetSingle(e, t, o)), e = `indicesToOffset_${h}_T`, s[e] = new u.GlslLibRoutine(c.indexToOffsetSingle(e, t, o.slice().reverse())) }), s } static indexToOffsetSingle(s, h, p) { let l = ""; for (let o = h - 1; o >= 0; --o) l += ` offset += indices[${o}] * ${p[o]}; `; return ` int ${s}(int indices[${h}]) { int offset = 0; ${l} return offset; } ` } offsetToIndices() { const s = {}; return this.context.programInfo.inputNames.forEach((h, p) => { const l = this.context.inputTextureLayouts[p].shape, o = this.context.inputTextureLayouts[p].strides, t = l.length; let e = `offsetToIndices_${h}`; s[e] = new u.GlslLibRoutine(c.offsetToIndicesSingle(e, t, o)), e = `offsetToIndices_${h}_T`, s[e] = new u.GlslLibRoutine(c.offsetToIndicesSingle(e, t, o.slice().reverse())) }), s } static offsetToIndicesSingle(s, h, p) { const l = []; for (let o = 0; o < h - 1; ++o) l.push(` indices[${o}] = offset / ${p[o]};`), l.push(` offset -= indices[${o}] * ${p[o]};`); return l.push(` indices[${h-1}] = offset;`), ` void ${s}(int offset, out int indices[${h}]) { ${l.join("")} } ` } incrementIndices() { const s = {}; return this.context.programInfo.inputNames.forEach((h, p) => { const l = this.context.inputTextureLayouts[p].shape, o = l.length, t = `incrementIndices_${h}`; let e = ""; for (let i = 0; i < o; ++i) e += ` shape[${i}] = ${l[i]};`; const r = ` void ${t}(int axis, out int indices[${o}]) { int shape[${o}]; ${e}; for(int i = ${o} -1 ; i >= 0; --i) { if(i > axis) continue; indices[i] += 1; if(indices[i] < shape[i]) { break; } indices[i] = 0; } } `; s[t] = new u.GlslLibRoutine(r) }), s } } n.ShapeUtilsGlslLib = c }, 5060: (b, n) => { Object.defineProperty(n, "__esModule", { value: !0 }), n.getDefaultFragShaderMain = n.getFragShaderPreamble = n.getVertexShaderSource = n.getGlsl = void 0; const a = { version: "", attribute: "attribute", varyingVertex: "varying", varyingFrag: "varying", texture2D: "texture2D", output: "gl_FragColor", outputDeclaration: "" }, u = { version: "#version 300 es", attribute: "in", varyingVertex: "out", varyingFrag: "in", texture2D: "texture", output: "outputColor", outputDeclaration: "out vec4 outputColor;" }; function c(f) { return f === 1 ? a : u } n.getGlsl = c, n.getVertexShaderSource = function(f) { const s = c(f); return `${s.version} precision highp float; ${s.attribute} vec3 position; ${s.attribute} vec2 textureCoord; ${s.varyingVertex} vec2 TexCoords; void main() { gl_Position = vec4(position, 1.0); TexCoords = textureCoord; }` }, n.getFragShaderPreamble = function(f) { const s = c(f); return `${s.version} precision highp float; precision highp int; precision highp sampler2D; ${s.varyingFrag} vec2 TexCoords; ${s.outputDeclaration} const vec2 halfCR = vec2(0.5, 0.5); // Custom vector types to handle higher dimenalities. struct ivec5 { int x; int y; int z; int w; int u; }; struct ivec6 { int x; int y; int z; int w; int u; int v; }; int imod(int x, int y) { return x - y * (x / y); } ` }, n.getDefaultFragShaderMain = function(f, s) { return ` void main() { int indices[${s}]; toVec(TexCoords, indices); vec4 result = vec4(process(indices)); ${c(f).output} = result; } ` } }, 3891: (b, n, a) => { Object.defineProperty(n, "__esModule", { value: !0 }), n.VecGlslLib = void 0; const u = a(8520); class c extends u.GlslLib { constructor(s) { super(s) } getCustomTypes() { return {} } getFunctions() { return Object.assign(Object.assign(Object.assign(Object.assign({}, this.binaryVecFunctions()), this.copyVec()), this.setVecItem()), this.getVecItem()) } binaryVecFunctions() { const s = this.context.outputTextureLayout.shape.length, h = { add: "+=", sub: "-=", mul: "*=", div: "/=" }, p = {}; for (const l in h) { const o = `${l}Vec`; let t = ""; for (let r = 0; r < s; ++r) t += ` dest[${r}] ${h[l]} src[${r}]; `; const e = ` void ${o}(int src[${s}], out int dest[${s}]) { ${t} } `; p[o] = new u.GlslLibRoutine(e) } return p } copyVec() { const s = this.context.outputTextureLayout.shape.length; let h = ""; for (let l = 0; l < s; ++l) h += ` dest[${l}] = src[${l}]; `; const p = ` void copyVec(int src[${s}], out int dest[${s}]) { ${h} } `; return { copyVec: new u.GlslLibRoutine(p) } } setVecItem() { const s = this.context.outputTextureLayout.shape.length; let h = ` if(index < 0) index =${s} + index; if (index == 0) m[0] = value; `; for (let l = 1; l < s - 1; ++l) h += ` else if (index == ${l}) m[${l}] = value; `; h += ` else m[${s-1}] = value; `; const p = ` void setVecItem(out int m[${s}], int index, int value) { ${h} } `; return { setVecItem: new u.GlslLibRoutine(p) } } getVecItem() { const s = this.context.outputTextureLayout.shape.length; let h = ` if(index < 0) index = ${s} + index; if (index == 0) return m[0]; `; for (let l = 1; l < s - 1; ++l) h += ` else if (index == ${l}) return m[${l}]; `; h += ` else return m[${s-1}]; `; const p = ` int getVecItem(int m[${s}], int index) { ${h} } `; return { getVecItem: new u.GlslLibRoutine(p) } } } n.VecGlslLib = c }, 8316: (b, n, a) => { Object.defineProperty(n, "__esModule", { value: !0 }), n.WebGLInferenceHandler = void 0; const u = a(6231), c = a(9162), f = a(2517), s = a(2403), h = a(7019), p = a(8710), l = a(5611), o = a(4057), t = a(2039); n.WebGLInferenceHandler = class { constructor(e) { this.session = e, this.packedTextureDataCache = new Map, this.unpackedTextureDataCache = new Map } calculateTextureWidthAndHeight(e, r) { return (0, o.calculateTextureWidthAndHeight)(this.session.layoutStrategy, e, r) } executeProgram(e, r) { if (r.length < e.inputNames.length) throw new Error(`Input size mustn't be less than ${e.inputNames.length}.`); if (e.inputNames.length !== e.inputTypes.length) throw new Error("input names size does not match input types"); const i = []; for (let T = 0; T < e.inputNames.length; ++T) i[T] = this.getOrCreateTextureData(r[T], e.inputTypes[T]); const d = ((T, w) => { const S = w.map(E => `${E.unpackedShape.join(",")};${E.width}x${E.height}`).join("_"); let O = T.name; return T.cacheHint && (O += "[" + T.cacheHint + "]"), O += ":" + S, O })(e, i); let g = this.session.programManager.getArtifact(d); const m = g ? g.programInfo : typeof e.get == "function" ? e.get() : e, _ = (0, o.createTextureLayoutFromTextureType)(this.session.layoutStrategy, m.output.dims, m.output.textureType), y = this.createTextureData(_, m.output.type); return g || (g = this.session.programManager.build(m, i, y), this.session.programManager.setArtifact(d, g)), this.runProgram(g, i, y), y } run(e, r) { return this.executeProgram(e, r).tensor } runProgram(e, r, i) { for (let d = 0; d < r.length; ++d) if (!!r[d].isPacked != (e.programInfo.inputTypes[d] === t.TextureType.packed)) throw new Error(`input[${d}] property packed inconsistent`); if (!!i.isPacked != (e.programInfo.output.textureType === t.TextureType.packed)) throw new Error("output property packed inconsistent"); this.session.programManager.run(e, r, i) } getOrCreateTextureData(e, r) { let i = this.getTextureData(e.dataId, r === t.TextureType.packed); if (!i && (i = this.getTextureData(e.dataId, r !== t.TextureType.packed), i)) return r === t.TextureType.packed ? this.pack(i) : this.unpack(i); if (!i) { const d = (0, o.createTextureLayoutFromTextureType)(this.session.layoutStrategy, e.dims, r); if (r === t.TextureType.packedLastDimension) { const _ = e.dims; if (_.length === 4) { const y = [_[0], Math.ceil(_[1] * _[2] * _[3] / 4)], T = (0, o.createTextureLayoutFromTextureType)(this.session.layoutStrategy, y, r); let w = e.numberData; if (_[1] * _[2] * _[3] % 4 != 0) { const S = _[0], O = _[1] * _[2] * _[3], E = Math.ceil(O * 1 / 4) * 4; w = new Float32Array(S * E); for (let v = 0; v < S; ++v) { const P = v * O, L = v * E + v % 1 * O; w.set(e.numberData.subarray(P, P + O), L) } } return this.createTextureData(T, e.type, w, e, 1) } } if (r === t.TextureType.packed) { const g = (0, o.createTextureLayoutFromShape)(this.session.layoutStrategy, e.dims, 1, [], { reverseWH: !0 }), m = this.createTextureData(g, e.type, e.numberData, e, 1); i = this.pack(m) } else i = this.createTextureData(d, e.type, e.numberData, e, 1) } return i } createTextureDataFromLayoutBindTensor(e, r, i, d) { return this.createTextureData(e, r, i, d, 1) } createTextureData(e, r, i, d, g) { u.Logger.verbose("InferenceHandler", `Creating TextureData: layout:[${JSON.stringify(e)}]`); const m = this.session.textureManager.createTextureFromLayout(r, e, i, g); return this.createTextureDataFromTexture(e, r, m, d) } reshapeUnpacked(e, r) { const i = this.getOrCreateTextureData(e, t.TextureType.unpacked), d = { channels: i.channels, height: i.height, width: i.width, shape: r.length !== 0 ? r : [1], strides: f.ShapeUtil.computeStrides(r), unpackedShape: r }; return this.createTextureDataFromTexture(d, e.type, i.texture).tensor } reshapePacked(e, r) { const i = this.getOrCreateTextureData(e, t.TextureType.packed); if ((0, h.isReshapeCheap)(e.dims, r)) { const y = { channels: i.channels, height: i.height, width: i.width, shape: r.length !== 0 ? r : [1], strides: f.ShapeUtil.computeStrides(r), unpackedShape: r, isPacked: !0 }; return this.createTextureDataFromTexture(y, e.type, i.texture).tensor } const d = (0, h.processDims3D)(e.dims), g = (0, h.processDims3D)(r), m = this.reshapePacked(e, d), _ = this.run((0, h.createPackedReshape3DProgramInfoLoader)(this, m, g), [m]); return this.reshapePacked(_, r) } cast(e, r) { const i = this.getOrCreateTextureData(e, t.TextureType.unpacked); return this.createTextureDataFromTexture(i, r, i.texture).tensor } createTextureDataFromTexture(e, r, i, d, g) { const m = Object.assign(Object.assign({}, e), { tensor: d || new c.Tensor(e.unpackedShape, r, _ => this.readTexture(m), async _ => this.readTextureAsync(m), void 0, g), texture: i }); return this.setTextureData(m.tensor.dataId, m, e.isPacked), m } getTextureData(e, r = !1) { return this.session.isInitializer(e) ? this.session.getTextureData(e, r) : r ? this.packedTextureDataCache.get(e) : this.unpackedTextureDataCache.get(e) } setTextureData(e, r, i = !1) { this.session.isInitializer(e) ? this.session.setTextureData(e, r, i) : (i ? this.packedTextureDataCache : this.unpackedTextureDataCache).set(e, r) } isTextureLayoutCached(e, r = !1) { return !!this.getTextureData(e.dataId, r) } dispose() { this.session.textureManager.clearActiveTextures(), this.packedTextureDataCache.forEach(e => this.session.textureManager.releaseTexture(e)), this.packedTextureDataCache = new Map, this.unpackedTextureDataCache.forEach(e => this.session.textureManager.releaseTexture(e)), this.unpackedTextureDataCache = new Map } readTexture(e) { return e.isPacked ? this.readTexture(this.unpack(e)) : this.session.backend.glContext.isFloat32DownloadSupported ? this.session.textureManager.readTexture(e, e.tensor.type, e.channels) : this.session.textureManager.readUint8TextureAsFloat((0, p.encodeAsUint8)(this, e)) } async readTextureAsync(e) { return e.isPacked ? this.readTextureAsync(this.unpack(e)) : this.session.backend.glContext.isFloat32DownloadSupported ? this.session.textureManager.readTextureAsync(e, e.tensor.type, e.channels) : this.session.textureManager.readUint8TextureAsFloat((0, p.encodeAsUint8)(this, e)) } pack(e) { return this.executeProgram((0, s.createPackProgramInfoLoader)(this, e.tensor), [e.tensor]) } unpack(e) { return this.executeProgram((0, l.createUnpackProgramInfoLoader)(this, e.tensor), [e.tensor]) } } }, 1640: function(b, n, a) { var u = this && this.__createBinding || (Object.create ? function(z, Z, J, ue) { ue === void 0 && (ue = J); var Se = Object.getOwnPropertyDescriptor(Z, J); Se && !("get" in Se ? !Z.__esModule : Se.writable || Se.configurable) || (Se = { enumerable: !0, get: function() { return Z[J] } }), Object.defineProperty(z, ue, Se) } : function(z, Z, J, ue) { ue === void 0 && (ue = J), z[ue] = Z[J] }), c = this && this.__setModuleDefault || (Object.create ? function(z, Z) { Object.defineProperty(z, "default", { enumerable: !0, value: Z }) } : function(z, Z) { z.default = Z }), f = this && this.__importStar || function(z) { if (z && z.__esModule) return z; var Z = {}; if (z != null) for (var J in z) J !== "default" && Object.prototype.hasOwnProperty.call(z, J) && u(Z, z, J); return c(Z, z), Z }; Object.defineProperty(n, "__esModule", { value: !0 }), n.WEBGL_OP_RESOLVE_RULES = void 0; const s = a(2898), h = f(a(7839)), p = a(4196), l = a(2069), o = a(8138), t = a(9663), e = a(5193), r = a(7992), i = a(1253), d = a(4776), g = a(6572), m = a(3346), _ = a(5623), y = a(2870), T = a(2143), w = a(4939), S = a(718), O = a(2268), E = a(8117), v = a(2278), P = a(5524), L = a(5975), V = a(3933), R = a(6558), k = a(5723), Y = a(3738), C = f(a(4909)), $ = a(8428), X = a(9793); n.WEBGL_OP_RESOLVE_RULES = [ ["Abs", "", "6+", C.abs], ["Acos", "", "7+", C.acos], ["Add", "", "7+", h.add], ["And", "", "7+", h.and], ["Asin", "", "7+", C.asin], ["Atan", "", "7+", C.atan], ["AveragePool", "", "7+", T.averagePool, T.parseAveragePoolAttributes], ["BatchNormalization", "", "7+", s.batchNormalization, s.parseBatchNormalizationAttributes], ["Cast", "", "6+", p.cast, p.parseCastAttributes], ["Ceil", "", "6+", C.ceil], ["Clip", "", "6-10", C.clip, C.parseClipAttributes], ["Clip", "", "11+", C.clipV11], ["Concat", "", "4+", l.concat, l.parseConcatAttributes], ["Conv", "", "1+", o.conv, o.parseConvAttributes], ["ConvTranspose", "", "1+", t.convTranspose, t.parseConvTransposeAttributes], ["Cos", "", "7+", C.cos], ["Div", "", "7+", h.div], ["Dropout", "", "7+", C.identity], ["DepthToSpace", "", "1+", e.depthToSpace, e.parseDepthToSpaceAttributes], ["Equal", "", "7+", h.equal], ["Elu", "", "6+", C.elu, C.parseEluAttributes], ["Exp", "", "6+", C.exp], ["Flatten", "", "1+", r.flatten, r.parseFlattenAttributes], ["Floor", "", "6+", C.floor], ["FusedConv", "com.microsoft", "1+", o.conv, o.parseConvAttributes], ["Gather", "", "1+", i.gather, i.parseGatherAttributes], ["Gemm", "", "7-10", d.gemm, d.parseGemmAttributesV7], ["Gemm", "", "11+", d.gemm, d.parseGemmAttributesV11], ["GlobalAveragePool", "", "1+", T.globalAveragePool, T.parseGlobalAveragePoolAttributes], ["GlobalMaxPool", "", "1+", T.globalMaxPool], ["Greater", "", "7+", h.greater], ["Identity", "", "1+", C.identity], ["ImageScaler", "", "1+", g.imageScaler, g.parseImageScalerAttributes], ["InstanceNormalization", "", "6+", m.instanceNormalization, m.parseInstanceNormalizationAttributes], ["LeakyRelu", "", "6+", C.leakyRelu, C.parseLeakyReluAttributes], ["Less", "", "7+", h.less], ["Log", "", "6+", C.log], ["MatMul", "", "1+", _.matMul, _.parseMatMulAttributes], ["MaxPool", "", "1+", T.maxPool, T.parseMaxPoolAttributes], ["Mul", "", "7+", h.mul], ["Neg", "", "6+", C.neg], ["Not", "", "1+", C.not], ["Or", "", "7+", h.or], ["Pad", "", "2-10", y.padV2, y.parsePadAttributesV2], ["Pad", "", "11+", y.padV11, y.parsePadAttributesV11], ["Pow", "", "7+", h.pow], ["PRelu", "", "7+", h.pRelu], ["ReduceLogSum", "", "1+", w.reduceLogSum, w.parseReduceAttributes], ["ReduceMax", "", "1+", w.reduceMax, w.parseReduceAttributes], ["ReduceMean", "", "1+", w.reduceMean, w.parseReduceAttributes], ["ReduceMin", "", "1+", w.reduceMin, w.parseReduceAttributes], ["ReduceProd", "", "1+", w.reduceProd, w.parseReduceAttributes], ["ReduceSum", "", "1-12", w.reduceSum, w.parseReduceAttributes], ["ReduceSumSquare", "", "1+", w.reduceLogSumSquare, w.parseReduceAttributes], ["Relu", "", "6+", C.relu], ["Reshape", "", "5+", S.reshape], ["Resize", "", "10", O.resize, O.parseResizeAttributesV10], ["Resize", "", "11+", O.resize, O.parseResizeAttributesV11], ["Shape", "", "1+", E.shape], ["Sigmoid", "", "6+", C.sigmoid], ["Sin", "", "7+", C.sin], ["Slice", "", "10+", v.sliceV10], ["Slice", "", "1-9", v.slice, v.parseSliceAttributes], ["Softmax", "", "1-12", P.softmax, P.parseSoftmaxAttributes], ["Softmax", "", "13+", P.softmaxV13, P.parseSoftmaxAttributesV13], ["Split", "", "2-12", L.split, L.parseSplitAttributes], ["Sqrt", "", "6+", C.sqrt], ["Squeeze", "", "1-12", V.squeeze, V.parseSqueezeAttributes], ["Squeeze", "", "13+", V.squeezeV13], ["Sub", "", "7+", h.sub], ["Sum", "", "6+", R.sum], ["Tan", "", "7+", C.tan], ["Tanh", "", "6+", C.tanh], ["Tile", "", "6+", k.tile], ["Transpose", "", "1+", Y.transpose, Y.parseTransposeAttributes], ["Upsample", "", "7-8", X.upsample, X.parseUpsampleAttributesV7], ["Upsample", "", "9", X.upsample, X.parseUpsampleAttributesV9], ["Unsqueeze", "", "1-12", $.unsqueeze, $.parseUnsqueezeAttributes], ["Unsqueeze", "", "13+", $.unsqueezeV13], ["Xor", "", "7+", h.xor] ] }, 2898: (b, n, a) => { Object.defineProperty(n, "__esModule", { value: !0 }), n.parseBatchNormalizationAttributes = n.batchNormalization = void 0; const u = a(246), c = a(5060), f = a(2039), s = { name: "BatchNormalization", inputNames: ["A", "Scale", "B", "Mean", "Variance"], inputTypes: [f.TextureType.unpacked, f.TextureType.unpacked, f.TextureType.unpacked, f.TextureType.unpacked, f.TextureType.unpacked] }; n.batchNormalization = (l, o, t) => (p(o), [l.run(Object.assign(Object.assign({}, s), { cacheHint: t.cacheKey, get: () => h(l, o, t) }), o)]), n.parseBatchNormalizationAttributes = l => { const o = l.attributes.getFloat("epsilon", 1e-5), t = l.attributes.getFloat("momentum", .9), e = l.attributes.getInt("spatial", 1); return (0, u.createAttributeWithCacheKey)({ epsilon: o, momentum: t, spatial: e }) }; const h = (l, o, t) => { const e = (0, c.getGlsl)(l.session.backend.glContext.version), r = o[0].dims.length, [i, d] = l.calculateTextureWidthAndHeight(o[1].dims, f.TextureType.unpacked), g = ` float process(int[${r}] indices) { vec2 position = offsetToCoords(indices[1], ${i}, ${d}); float scale = getColorAsFloat(${e.texture2D}(Scale, position)); float mean = getColorAsFloat(${e.texture2D}(Mean, position)); float variance = getColorAsFloat(${e.texture2D}(Variance, position)); float b = getColorAsFloat(${e.texture2D}(B, position)); return scale * ( (_A(indices) - mean) / sqrt(variance + float(${t.epsilon})) ) + b; }`; return Object.assign(Object.assign({}, s), { output: { dims: o[0].dims, type: o[0].type, textureType: f.TextureType.unpacked }, shaderSource: g }) }, p = l => { if (!l || l.length !== 5) throw new Error("BatchNormalization requires 5 inputs."); const o = l[0], t = l[1], e = l[2], r = l[3], i = l[4]; if (o.dims.length < 3 || t.dims.length !== 1 || e.dims.length !== 1 || r.dims.length !== 1 || i.dims.length !== 1) throw new Error("invalid input shape."); if (t.dims[0] !== o.dims[1] || e.dims[0] !== o.dims[1] || r.dims[0] !== o.dims[1] || i.dims[0] !== o.dims[1]) throw new Error("invalid input shape."); if (o.type !== "float32" && o.type !== "float64" || t.type !== "float32" && t.type !== "float64" || e.type !== "float32" && e.type !== "float64" || r.type !== "float32" && r.type !== "float64" || i.type !== "float32" && i.type !== "float64") throw new Error("invalid input tensor types.") } }, 7839: (b, n, a) => { Object.defineProperty(n, "__esModule", { value: !0 }), n.xor = n.sub = n.pRelu = n.pow = n.or = n.mul = n.less = n.greater = n.equal = n.div = n.and = n.add = n.glslPRelu = n.glslPow = n.glslXor = n.glslOr = n.glslAnd = n.glslLess = n.glslGreater = n.glslEqual = n.glslSub = n.glslMul = n.glslDiv = n.glslAdd = void 0; const u = a(2517), c = a(8520), f = a(5060), s = a(2039); function h() { const w = "add_"; return { body: ` float ${w}(float a, float b) { return a + b; } vec4 ${w}(vec4 v1, vec4 v2) { return v1 + v2; } `, name: w, type: c.FunctionType.ValueBased } } function p() { const w = "div_"; return { body: ` float ${w}(float a, float b) { return a / b; } vec4 ${w}(vec4 v1, vec4 v2) { return v1 / v2; } `, name: w, type: c.FunctionType.ValueBased } } function l() { const w = "mul_"; return { body: ` float ${w}(float a, float b) { return a * b; } vec4 ${w}(vec4 v1, vec4 v2) { return v1 * v2; } `, name: w, type: c.FunctionType.ValueBased } } function o() { const w = "sub_"; return { body: ` float ${w}(float a, float b) { return a - b; } vec4 ${w}(vec4 v1, vec4 v2) { return v1 - v2; } `, name: w, type: c.FunctionType.ValueBased } } function t() { const w = "equal_"; return { body: ` float ${w}(float a, float b) { return float(a == b); } vec4 ${w}(vec4 v1, vec4 v2) { return vec4(equal(v1, v2)); } `, name: w, type: c.FunctionType.ValueBased } } function e() { const w = "greater_"; return { body: ` float ${w}(float a, float b) { return float(a > b); } vec4 ${w}(vec4 v1, vec4 v2) { return vec4( v1.r > v2.r , v1.g > v2.g, v1.b > v2.b, v1.a > v2.a ); } `, name: w, type: c.FunctionType.ValueBased } } function r() { const w = "less_"; return { body: ` float ${w}(float a, float b) { return float(a < b); } vec4 ${w}(vec4 v1, vec4 v2) { return vec4( v1.r < v2.r , v1.g < v2.g, v1.b < v2.b, v1.a < v2.a ); } `, name: w, type: c.FunctionType.ValueBased } } function i() { const w = "and_"; return { body: ` float ${w}(float a, float b) { return float( bool(a) && bool(b) ); } vec4 ${w}(vec4 v1, vec4 v2) { bvec4 b1 = bvec4(v1); bvec4 b2 = bvec4(v2); return vec4( b1.r && b2.r , b1.g && b2.g, b1.b && b2.b, b1.a && b2.a ); } `, name: w, type: c.FunctionType.ValueBased } } function d() { const w = "or_"; return { body: ` float ${w}(float a, float b) { return float( bool(a) || bool(b) ); } vec4 ${w}(vec4 v1, vec4 v2) { bvec4 b1 = bvec4(v1); bvec4 b2 = bvec4(v2); return vec4( b1.r || b2.r , b1.g || b2.g, b1.b || b2.b, b1.a || b2.a ); } `, name: w, type: c.FunctionType.ValueBased } } function g() { const w = "xor_"; return { body: ` float ${w}(float a, float b) { return float( bool(a) ^^ bool(b) ); } vec4 ${w}(vec4 v1, vec4 v2) { bvec4 b1 = bvec4(v1); bvec4 b2 = bvec4(v2); return vec4( b1.r ^^ b2.r , b1.g ^^ b2.g, b1.b ^^ b2.b, b1.a ^^ b2.a ); } `, name: w, type: c.FunctionType.ValueBased } } function m() { return function(w) { const S = `${w}_`; return { body: ` float ${S}(float a, float b) { return ${w}(a, b); } vec4 ${S}(vec4 v1, vec4 v2) { return ${w}(v1, v2); } `, name: S, type: c.FunctionType.ValueBased } }("pow") } function _() { const w = "prelu_"; return { body: ` float ${w}(float a, float b) { return a < 0.0 ? a * b: a; } vec4 ${w}(vec4 v1, vec4 v2) { return vec4( v1.r < 0.0 ? v1.r * v2.r: v1.r, v1.g < 0.0 ? v1.g * v2.g: v1.g, v1.b < 0.0 ? v1.b * v2.b: v1.b, v1.a < 0.0 ? v1.a * v2.a: v1.a ); } `, name: w, type: c.FunctionType.ValueBased } } n.glslAdd = h, n.glslDiv = p, n.glslMul = l, n.glslSub = o, n.glslEqual = t, n.glslGreater = e, n.glslLess = r, n.glslAnd = i, n.glslOr = d, n.glslXor = g, n.glslPow = m, n.glslPRelu = _; const y = (w, S, O, E = S[0].type, v) => { const P = w.session.pack ? s.TextureType.packed : s.TextureType.unpacked; return { name: O.name, inputNames: ["A", "B"], inputTypes: [P, P], cacheHint: v, get: () => T(w, S, O, E) } }, T = (w, S, O, E = S[0].type) => { const v = w.session.pack ? s.TextureType.packed : s.TextureType.unpacked, P = !u.ShapeUtil.areEqual(S[0].dims, S[1].dims); let L = S[0].dims; const V = w.session.pack; if (P) { const Y = u.BroadcastUtil.calcShape(S[0].dims, S[1].dims, !1); if (!Y) throw new Error("Can't perform binary op on the given tensors"); L = Y; const C = L.length, $ = S[0].dims.length !== 0 ? S[0].dims.length : 1, X = S[1].dims.length !== 0 ? S[1].dims.length : 1, z = S[0].dims.length !== 0 ? "bcastIndices_A(indices, aindices);" : "aindices[0] = 0;", Z = S[1].dims.length !== 0 ? "bcastIndices_B(indices, bindices);" : "bindices[0] = 0;", J = (0, f.getGlsl)(w.session.backend.glContext.version), ue = V ? ` ${O.body} void main() { vec4 a = getAAtOutCoords(); vec4 b = getBAtOutCoords(); vec4 result = ${O.name}(a, b); ${J.output} = result; }` : ` ${O.body} float process(int indices[${C}]) { int aindices[${$}]; int bindices[${X}]; ${z} ${Z} return ${O.name}(_A(aindices), _B(bindices)); }`; return { name: O.name, inputNames: ["A", "B"], inputTypes: [v, v], output: { dims: L, type: E, textureType: v }, shaderSource: ue, hasMain: V } } const R = (0, f.getGlsl)(w.session.backend.glContext.version), k = ` ${O.body} void main() { vec4 v1 = ${R.texture2D}(A, TexCoords); vec4 v2 = ${R.texture2D}(B, TexCoords); vec4 result = ${O.name}(v1, v2); ${R.output} = result; } `; return { name: O.name, inputNames: ["A", "B"], inputTypes: [v, v], output: { dims: S[0].dims, type: E, textureType: v }, shaderSource: k, hasMain: !0 } }; n.add = (w, S) => [w.run(y(w, S, h()), S)], n.and = (w, S) => [w.run(y(w, S, i(), "bool"), S)], n.div = (w, S) => [w.run(y(w, S, p()), S)], n.equal = (w, S) => [w.run(y(w, S, t(), "bool"), S)], n.greater = (w, S) => [w.run(y(w, S, e(), "bool"), S)], n.less = (w, S) => [w.run(y(w, S, r(), "bool"), S)], n.mul = (w, S) => [w.run(y(w, S, l()), S)], n.or = (w, S) => [w.run(y(w, S, d(), "bool"), S)], n.pow = (w, S) => [w.run(y(w, S, m()), S)], n.pRelu = (w, S) => [w.run(y(w, S, _()), S)], n.sub = (w, S) => [w.run(y(w, S, o()), S)], n.xor = (w, S) => [w.run(y(w, S, g(), "bool"), S)] }, 4196: (b, n, a) => { Object.defineProperty(n, "__esModule", { value: !0 }), n.parseCastAttributes = n.cast = void 0; const u = a(2517); n.cast = (f, s, h) => (c(s), [f.cast(s[0], h)]), n.parseCastAttributes = f => u.ProtoUtil.tensorDataTypeFromProto(f.attributes.getInt("to")); const c = f => { if (!f || f.length !== 1) throw new Error("Cast requires 1 input."); if (f[0].type === "string") throw new Error("Invalid input type.") } }, 1163: (b, n, a) => { Object.defineProperty(n, "__esModule", { value: !0 }), n.createPackedConcatProgramInfoLoader = void 0; const u = a(5060), c = a(2039), f = a(9390), s = a(2827); n.createPackedConcatProgramInfoLoader = (p, l, o) => { const t = (e = l.length, r = o.cacheKey, { name: "Concat (packed)", inputNames: Array.from({ length: e }, (i, d) => `X${d}`), inputTypes: Array(e).fill(c.TextureType.packed), cacheHint: r }); var e, r; return Object.assign(Object.assign({}, t), { get: () => ((i, d, g, m) => { const _ = g[0].dims.slice(); if (m >= _.length || m < -1 * _.length) throw new Error("axis specified for concat doesn't match input dimensionality"); m < 0 && (m = _.length + m); const y = _.slice(0); for (let z = 1; z < g.length; z++) { const Z = g[z].dims.slice(); for (let J = 0; J < _.length; J++) if (J === m) y[m] += Z[J]; else if (_[J] !== Z[J]) throw new Error("non concat dimensions must match") } const T = y.length, w = (0, s.getChannels)("coords", T), S = (0, f.getCoordsDataType)(T), O = (0, s.unpackFromChannel)(), E = g.map(z => z.dims), v = (0, f.getGlChannels)(T), P = new Array(E.length - 1); P[0] = E[0][m]; for (let z = 1; z < P.length; z++) P[z] = P[z - 1] + E[z][m]; const L = v[m], V = v.slice(-2), R = v.join(); let k = `if (${L} < ${P[0]}) { return getChannel( getX0(${R}), vec2(${V.join()})); }`; for (let z = 1; z < P.length; z++) { const Z = P[z - 1]; k += ` if (${L} < ${P[z]} && ${L} >= ${P[z-1]}) { return getChannel( getX${z}(${h(v,L,Z)}), vec2(${h(V,L,Z)})); }` } const Y = P.length, C = P[P.length - 1]; k += ` return getChannel( getX${Y}(${h(v,L,C)}), vec2(${h(V,L,C)}));`; const $ = (0, u.getGlsl)(i.session.backend.glContext.version), X = ` ${O} float getValue(${v.map(z=>"int "+z)}) { ${k} } void main() { ${S} coords = getOutputCoords(); int lastDim = coords.${v[T-1]}; coords.${v[T-1]} = coords.${v[T-2]}; coords.${v[T-2]} = lastDim; vec4 result = vec4(getValue(${w}), 0., 0., 0.); ${w[T-1]} = ${w[T-1]} + 1; if (${w[T-1]} < ${y[T-1]}) { result.g = getValue(${w}); } ${w[T-2]} = ${w[T-2]} + 1; if (${w[T-2]} < ${y[T-2]}) { result.a = getValue(${w}); } ${w[T-1]} = ${w[T-1]} - 1; if (${w[T-2]} < ${y[T-2]} && ${w[T-1]} < ${y[T-1]}) { result.b = getValue(${w}); } ${$.output} = result; } `; return Object.assign(Object.assign({}, d), { output: { dims: y, type: g[0].type, textureType: c.TextureType.packed }, shaderSource: X, hasMain: !0 }) })(p, t, l, o.axis) }) }; const h = (p, l, o) => { const t = p.indexOf(l); return p.map((e, r) => r === t ? `${e} - ${o}` : e).join() } }, 2069: (b, n, a) => { Object.defineProperty(n, "__esModule", { value: !0 }), n.parseConcatAttributes = n.concat = void 0; const u = a(246), c = a(2039), f = a(1163); n.concat = (e, r, i) => (t(r), e.session.pack && r[0].dims.length > 1 ? [e.run((0, f.createPackedConcatProgramInfoLoader)(e, r, i), r)] : [e.run(s(e, r, i), r)]); const s = (e, r, i) => { const d = (g = r.length, m = i.cacheKey, { name: "Concat", inputNames: Array.from({ length: g }, (_, y) => `X${y}`), inputTypes: Array(g).fill(c.TextureType.unpacked), cacheHint: m }); var g, m; return Object.assign(Object.assign({}, d), { get: () => ((_, y, T, w) => { const S = T[0].dims.slice(); if (w >= S.length || w < -1 * S.length) throw new Error("axis specified for concat doesn't match input dimensionality"); w < 0 && (w = S.length + w); const O = S.slice(0); for (let R = 1; R < T.length; R++) { const k = T[R].dims.slice(); for (let Y = 0; Y < S.length; Y++) if (Y === w) O[w] += k[Y]; else if (S[Y] !== k[Y]) throw new Error("non concat dimensions must match") } const E = O.length, v = new Array(T.length); let P = 0; for (let R = 0; R < v.length; ++R) P += T[R].dims[w], v[R] = P; let L = ""; L = T.length < 5 ? h(v) : p(v); const V = ` ${l(T.length,E)} ${o(v)} ${L} float process(int indices[${E}]) { int textureIndex = getTextureWhereDataResides (indices[${w}]); if(textureIndex != 0) { indices[${w}] = indices[${w}] - int(getSizeInConcatAxisValueFromIndex(textureIndex-int(1))); } return fetchDataFromCorrectTexture(textureIndex, indices); }`; return Object.assign(Object.assign({}, y), { output: { dims: O, type: T[0].type, textureType: c.TextureType.unpacked }, shaderSource: V }) })(0, d, r, i.axis) }) }, h = e => `int getTextureWhereDataResides(int index) { ${e.map((r,i)=>`if(index<${r}) {return ${i};} `).join("")} }`, p = e => h(e), l = (e, r) => { const i = [`float fetchDataFromCorrectTexture(int textureIndex, int indices[${r}]) {`]; for (let d = 0; d < e; ++d) d === 0 ? i.push(` if (textureIndex == ${d}) { return _X${d}(indices); }`) : d === e - 1 ? i.push(` else { return _X${d}(indices); }`) : i.push(` else if (textureIndex == ${d}) { return _X${d}(indices); }`); return i.push(" }"), i.join(` `) }, o = e => { const r = ["int getSizeInConcatAxisValueFromIndex(int index) {"]; for (let i = 0; i < e.length; ++i) i === 0 ? r.push(` if (index == ${i}) { return ${e[i]}; }`) : i === e.length - 1 ? r.push(` else { return ${e[i]}; }`) : r.push(` else if (index == ${i}) { return ${e[i]}; }`); return r.push(" }"), r.join(` `) }; n.parseConcatAttributes = e => (0, u.createAttributeWithCacheKey)({ axis: e.attributes.getInt("axis") }); const t = e => { if (!e || e.length < 1) throw new Error("too few inputs"); const r = e[0].type, i = e[0].dims.length; if (r === "string") throw new Error("string tensor is not supported yet"); for (const d of e) { if (d.type !== r) throw new Error("input tensors should be one type"); if (d.dims.length !== i) throw new Error("input tensors should have the same shape") } } }, 4770: (b, n, a) => { Object.defineProperty(n, "__esModule", { value: !0 }), n.createUnpackedGroupedConvProgramInfoLoader = void 0; const u = a(6231), c = a(5060), f = a(2039), s = a(8138), h = a(2823); n.createUnpackedGroupedConvProgramInfoLoader = (p, l, o) => { const t = (e = l.length > 2, r = o.cacheKey, { name: "GroupedConv", inputNames: e ? ["X", "W", "Bias"] : ["X", "W"], inputTypes: e ? [f.TextureType.unpacked, f.TextureType.unpacked, f.TextureType.unpacked] : [f.TextureType.unpacked, f.TextureType.unpacked], cacheHint: r }); var e, r; return Object.assign(Object.assign({}, t), { get: () => ((i, d, g, m) => { const _ = d.length > 2 ? "value += getBias(output_channel);" : "", y = d[0].dims.slice(), T = d[1].dims.slice(), w = T[0] / m.group; u.Logger.verbose("GroupedConv", `autpPad:${m.autoPad}, dilations:${m.dilations}, group:${m.group}, kernelShape:${m.kernelShape}, pads:${m.pads}, strides:${m.strides}`); const S = (0, s.calculateOutputShape)(y, T, m.dilations, m.pads, m.strides), O = (0, c.getGlsl)(i.session.backend.glContext.version), { activationFunction: E, applyActivation: v } = (0, h.getActivationSnippet)(m), P = ` const ivec2 strides = ivec2(${m.strides[0]}, ${m.strides[1]}); const ivec2 pads = ivec2(${m.pads[0]}, ${m.pads[1]}); ${E} void main() { ivec4 coords = getOutputCoords(); int batch = coords.x; int output_channel = coords.y; ivec2 xRCCorner = coords.zw * strides - pads; int group_id = output_channel / ${w}; float value = 0.0; for (int wInChannel = 0; wInChannel < ${T[1]}; wInChannel++) { int input_channel = group_id * ${T[1]} + wInChannel; for (int wHeight = 0; wHeight < ${T[2]}; wHeight++) { int xHeight = xRCCorner.x + wHeight * ${m.dilations[0]}; if (xHeight < 0 || xHeight >= ${y[2]}) { continue; } for (int wWidth = 0; wWidth < ${T[3]}; wWidth++) { int xWidth = xRCCorner.y + wWidth * ${m.dilations[1]}; if (xWidth < 0 || xWidth >= ${y[3]}) { continue; } float xVal = getX(batch, input_channel, xWidth, xHeight); float wVal = getW(output_channel, wInChannel, wWidth, wHeight); value += xVal*wVal; } } } ${_} ${v} ${O.output} = vec4(value, .0, .0, .0); } `; return Object.assign(Object.assign({}, g), { output: { dims: S, type: d[0].type, textureType: f.TextureType.unpacked }, shaderSource: P, hasMain: !0 }) })(p, l, t, o) }) } }, 1386: (b, n, a) => { Object.defineProperty(n, "__esModule", { value: !0 }), n.conv2DPacked = n.conv2DPackedPointwise = void 0; const u = a(8138), c = a(8555), f = a(708); n.conv2DPackedPointwise = (s, h, p) => { const l = h[0].dims, o = h[1].dims, t = (0, u.calculateOutputShape)(l, o, p.dilations, p.pads, p.strides), e = s.reshapePacked(h[0], [l[1], l[2] * l[3]]), r = s.reshapePacked(h[1], [o[0], o[1]]), i = h.length > 2 ? [r, e, h[2]] : [r, e], d = s.run((0, f.createPackedMatmulProgramInfoLoader)(s, i, p), i); return s.reshapePacked(d, t) }, n.conv2DPacked = (s, h, p) => { const l = h[0].dims, o = h[1].dims, t = (0, u.calculateOutputShape)(l, o, p.dilations, p.pads, p.strides), e = s.run((0, c.createPackedIm2ColProgramInfoLoader)(s, h[0], h[1], t, p), [h[0]]), r = s.reshapePacked(h[1], [o[0], o[1] * o[2] * o[3]]), i = h.length === 3 ? [r, e, h[2]] : [r, e], d = s.run((0, f.createPackedMatmulProgramInfoLoader)(s, i, p), i); return s.reshapePacked(d, t) } }, 9663: (b, n, a) => { Object.defineProperty(n, "__esModule", { value: !0 }), n.parseConvTransposeAttributes = n.convTranspose = void 0; const u = a(246), c = a(5060), f = a(2039), s = a(2823), h = (r, i, d, g, m, _) => (r - 1) * i + d + (g - 1) * m + 1 - _, p = (r, i, d, g, m) => { const _ = Math.floor(r / 2); i === "SAME_UPPER" ? (d[g] = _, d[m] = r - _) : i === "SAME_LOWER" && (d[g] = r - _, d[m] = _) }; n.convTranspose = (r, i, d) => (e(i, d), l(r, i, d)); const l = (r, i, d) => { const g = t(d, i); return [o(r, i, g)] }, o = (r, i, d) => r.run(((g, m, _) => { const y = (T = m.length > 2, w = _.cacheKey, { name: "ConvTranspose", inputNames: T ? ["X", "W", "B"] : ["X", "W"], inputTypes: T ? [f.TextureType.unpacked, f.TextureType.unpacked, f.TextureType.unpacked] : [f.TextureType.unpacked, f.TextureType.unpacked], cacheHint: w }); var T, w; return Object.assign(Object.assign({}, y), { get: () => ((S, O, E, v) => { const P = O.length > 2 ? "getB(output_channel)" : "0.0", L = O[0].dims, V = O[1].dims, R = V[1], k = V[0] / v.group, Y = [O[0].dims[0], O[1].dims[1] * v.group, ...v.outputShape], C = (0, c.getGlsl)(S.session.backend.glContext.version), { activationFunction: $, applyActivation: X } = (0, s.getActivationSnippet)(v), z = ` const ivec2 strides = ivec2(${v.strides[0]}, ${v.strides[1]}); const ivec2 pads = ivec2(${v.pads[0]}, ${v.pads[1]}); ${$} void main() { ivec4 coords = getOutputCoords(); int batch = coords.x; int output_channel = coords.y; ivec2 loc = coords.zw + pads; int group_id = output_channel / ${R}; int wOutChannel = output_channel - group_id * ${R}; float value = ${P}; for (int inChannelOffset = 0; inChannelOffset < ${k}; inChannelOffset++) { int input_channel = group_id * ${k} + inChannelOffset; for (int wWOff = 0; wWOff < ${V[2]}; wWOff++) { for (int wHOff = 0; wHOff < ${V[3]}; wHOff++) { ivec2 wOff = ivec2(wWOff * ${v.dilations[0]}, wHOff * ${v.dilations[1]}); ivec2 wLoc = loc - wOff; ivec2 wLocIn = wLoc / strides; if ( wLocIn * strides == wLoc && wLocIn.x >= 0 && wLocIn.x < ${L[2]} && wLocIn.y >= 0 && wLocIn.y < ${L[3]} ) { float xVal = getX(batch, input_channel, wLocIn.y, wLocIn.x); float wVal = getW(input_channel, wOutChannel, wHOff, wWOff); value += xVal * wVal; } } } } ${X} ${C.output} = vec4(value, .0, .0, .0); } `; return Object.assign(Object.assign({}, E), { output: { dims: Y, type: O[0].type, textureType: f.TextureType.unpacked }, shaderSource: z, hasMain: !0 }) })(g, m, y, _) }) })(r, i, d), i), t = (r, i) => { const d = r.kernelShape.slice(); if (r.kernelShape.length === 0) for (let y = 2; y < i[1].dims.length; ++y) d.push(i[1].dims[y]); const g = r.pads.slice(), m = r.outputShape.slice(); ((y, T, w, S, O, E, v, P) => { const L = y.length - 2, V = P.length === 0; for (let R = 0; R < L; ++R) { const k = V ? y[R + 2] * E[R] : P[R], Y = h(y[R + 2], E[R], O[R], T[R], w[R], k); p(Y, S, O, R, R + L), V && P.push(E[R] * (y[R + 2] - 1) + v[R] + (T[R] - 1) * w[R] + 1 - O[R] - O[R + L]) } })(i[0].dims, d, r.dilations, r.autoPad, g, r.strides, r.outputPadding, m); const _ = Object.assign({}, r); return Object.assign(_, { kernelShape: d, pads: g, outputShape: m, cacheKey: r.cacheKey }), _ }; n.parseConvTransposeAttributes = r => { const i = r.attributes, d = (0, s.parseInternalActivationAttributes)(i), g = i.getString("auto_pad", "NOTSET"), m = i.getInts("dilations", [1, 1]), _ = i.getInt("group", 1), y = i.getInts("kernel_shape", []), T = i.getInts("output_padding", [0, 0]), w = i.getInts("output_shape", []), S = i.getInts("pads", [0, 0, 0, 0]), O = i.getInts("strides", [1, 1]); return (0, u.createAttributeWithCacheKey)(Object.assign({ autoPad: g, dilations: m, group: _, kernelShape: y, outputPadding: T, outputShape: w, pads: S, strides: O }, d)) }; const e = (r, i) => { if (!r || r.length !== 2 && r.length !== 3) throw new Error("Conv requires 2 or 3 inputs"); if (r[0].dims.length !== 4 || r[1].dims.length !== 4) throw new Error("currently only support 2-dimensional conv"); if (r[0].dims[1] !== r[1].dims[0]) throw new Error("FILTER_IN_CHANNEL should be equal to DATA_CHANNEL"); const d = r[1].dims[1] * i.group; if (r.length === 3 && (r[2].dims.length !== 1 || r[2].dims[0] !== d)) throw new Error("invalid bias"); const g = r[0].dims.length - 2; if (i.dilations.length !== g) throw new Error(`dilations should be ${g}D`); if (i.strides.length !== g) throw new Error(`strides should be ${g}D`); if (i.pads.length !== 2 * g) throw new Error(`pads should be ${2*g}D`); if (i.outputPadding.length !== g) throw new Error(`output_padding should be ${g}D`); if (i.kernelShape.length !== 0 && i.kernelShape.length !== r[1].dims.length - 2) throw new Error("invalid kernel shape"); if (i.outputShape.length !== 0 && i.outputShape.length !== r[0].dims.length - 2) throw new Error("invalid output shape"); if (r[0].type !== "float32" || r[1].type !== "float32") throw new Error("ConvTranspose input(X,W) should be float tensor"); if (r.length === 3 && r[2].type !== "float32") throw new Error("ConvTranspose input(bias) should be float tensor") } }, 8138: (b, n, a) => { Object.defineProperty(n, "__esModule", { value: !0 }), n.parseConvAttributes = n.conv = n.calculateOutputShape = void 0; const u = a(246), c = a(2517), f = a(4770), s = a(1386), h = a(9828), p = a(2823), l = a(3248), o = a(5623); n.calculateOutputShape = (g, m, _, y, T) => { const w = g[0], S = g.slice(2), O = S.length, E = m[0], v = m.slice(2).map((L, V) => L + (L - 1) * (_[V] - 1)), P = S.map((L, V) => L + y[V] + y[V + O]).map((L, V) => Math.floor((L - v[V] + T[V]) / T[V])); return [w, E].concat(...P) }, n.conv = (g, m, _) => (d(m, _), t(g, m, _)); const t = (g, m, _) => { const y = i(_, m), T = g.session.pack, w = y.kernelShape[0] === 1 && y.kernelShape[1] === 1; return y.group > 1 ? [g.run((0, f.createUnpackedGroupedConvProgramInfoLoader)(g, m, y), m)] : w && T ? [e(g, m, y)] : T && m[0].dims.length === 4 && m[0].dims[0] === 1 && !w ? [(0, s.conv2DPacked)(g, m, y)] : [r(g, m, y)] }, e = (g, m, _) => { const y = m[0].dims, T = m[1].dims, w = (0, n.calculateOutputShape)(y, T, _.dilations, _.pads, _.strides), S = g.reshapeUnpacked(m[0], [y[1], y[2] * y[3]]), O = g.reshapeUnpacked(m[1], [T[0], T[1]]), E = m.length > 2 ? [O, S, m[2]] : [O, S], v = g.run((0, o.createMatmulProgramInfoLoader)(E, _), E); return g.reshapeUnpacked(v, w) }, r = (g, m, _) => { const y = m[0].dims, T = m[1].dims, w = (0, n.calculateOutputShape)(y, T, _.dilations, _.pads, _.strides), S = g.run((0, l.createIm2ColProgramInfoLoader)(g, m[0], m[1], w, _), [m[0]]), O = m.length === 3 ? [S, m[1], m[2]] : [S, m[1]]; return g.run((0, h.createDotProductProgramInfoLoader)(g, m, w, _), O) }, i = (g, m) => { const _ = g.kernelShape.slice(); if (g.kernelShape.length === 0) for (let w = 2; w < m[1].dims.length; ++w) _.push(m[1].dims[w]); const y = g.pads.slice(); c.PoolConvUtil.adjustPadsBasedOnAutoPad(m[0].dims, g.strides, g.dilations, _, y, g.autoPad); const T = Object.assign({}, g); return Object.assign(T, { kernelShape: _, pads: y, cacheKey: g.cacheKey }), T }; n.parseConvAttributes = g => { const m = g.attributes, _ = (0, p.parseInternalActivationAttributes)(m), y = m.getString("auto_pad", "NOTSET"), T = m.getInts("dilations", [1, 1]), w = m.getInt("group", 1), S = m.getInts("kernel_shape", []), O = m.getInts("pads", [0, 0, 0, 0]), E = m.getInts("strides", [1, 1]); return (0, u.createAttributeWithCacheKey)(Object.assign({ autoPad: y, dilations: T, group: w, kernelShape: S, pads: O, strides: E }, _)) }; const d = (g, m) => { if (!g || g.length !== 2 && g.length !== 3) throw new Error("Conv requires 2 or 3 inputs"); if (g[0].dims.length !== 4 || g[1].dims.length !== 4) throw new Error("currently only support 2-dimensional conv"); if (g[0].dims[1] !== g[1].dims[1] * m.group) throw new Error("FILTER_IN_CHANNEL should be equal to DATA_CHANNEL"); if (g.length === 3 && (g[2].dims.length !== 1 || g[1].dims[0] !== g[2].dims[0])) throw new Error("invalid bias"); const _ = g[0].dims.length - 2; if (m.dilations.length !== _) throw new Error(`dilations should be ${_}D`); if (m.strides.length !== _) throw new Error(`strides should be ${_}D`); if (m.pads.length !== 2 * _) throw new Error(`pads should be ${2*_}D`); if (m.kernelShape.length !== 0 && m.kernelShape.length !== g[1].dims.length - 2) throw new Error("invalid kernel shape"); if (g[0].type !== "float32" || g[1].type !== "float32") throw new Error("Conv input(X,W) should be float tensor"); if (g.length === 3 && g[2].type !== "float32") throw new Error("Conv input(bias) should be float tensor") } }, 5193: (b, n, a) => { Object.defineProperty(n, "__esModule", { value: !0 }), n.parseDepthToSpaceAttributes = n.depthToSpace = void 0; const u = a(3738); n.depthToSpace = (f, s, h) => { c(s); const p = h.blocksize, l = p * p, o = h.mode === "DCR" ? [0, 3, 4, 1, 5, 2] : [0, 1, 4, 2, 5, 3], t = h.mode === "DCR" ? [s[0].dims[0], p, p, s[0].dims[1] / l, s[0].dims[2], s[0].dims[3]] : [s[0].dims[0], s[0].dims[1] / l, p, p, s[0].dims[2], s[0].dims[3]], e = f.reshapeUnpacked(s[0], t), r = { perm: o, cacheKey: `${o}` }, [i] = (0, u.transpose)(f, [e], r), d = [s[0].dims[0], s[0].dims[1] / l, s[0].dims[2] * p, s[0].dims[3] * p]; return [f.reshapeUnpacked(i, d)] }, n.parseDepthToSpaceAttributes = f => { const s = f.attributes.getInt("blocksize"); if (s < 1) throw new Error(`blocksize must be >= 1, but got : ${s} for DepthToSpace`); const h = f.attributes.getString("mode", "DCR"); if (h !== "DCR" && h !== "CRD") throw new Error(`unrecognized mode: ${h} for DepthToSpace`); return { mode: h, blocksize: s } }; const c = f => { if (f.length !== 1) throw new Error(`DepthToSpace expect 1 inputs, but got ${f.length}`); if (f[0].type === "string" || f[0].dims.length !== 4) throw new TypeError("DepthToSpace input should be a 4-D numeric tensor") } }, 9828: (b, n, a) => { Object.defineProperty(n, "__esModule", { value: !0 }), n.createDotProductProgramInfoLoader = void 0; const u = a(2517), c = a(5060), f = a(2039), s = a(2823), h = a(3248); n.createDotProductProgramInfoLoader = (p, l, o, t) => { const e = ((r, i) => ({ name: "ConvDotProduct", inputNames: r ? ["Im2Col", "K", "B"] : ["Im2Col", "K"], inputTypes: r ? [f.TextureType.unpacked, f.TextureType.packedLastDimension, f.TextureType.unpacked] : [f.TextureType.unpacked, f.TextureType.packedLastDimension], cacheKey: i.activationCacheKey }))(l.length > 2, t); return Object.assign(Object.assign({}, e), { get: () => ((r, i, d, g, m) => { const _ = d[0].dims, y = d[1].dims, T = [y[0], Math.ceil(_[1] * y[2] * y[3] / 4)], w = (0, h.calculateIm2ColDims)(_, y, g), [S, O] = r.calculateTextureWidthAndHeight(T, f.TextureType.packedLastDimension), E = u.ShapeUtil.computeStrides(w), [v, P] = r.calculateTextureWidthAndHeight(w, f.TextureType.packedLastDimension), L = g.length, V = d.length < 3 ? "0.0" : "_B(b)", R = Math.ceil(_[1] * y[2] * y[3] / 4), { activationFunction: k, applyActivation: Y } = (0, s.getActivationSnippet)(m), C = (0, c.getGlsl)(r.session.backend.glContext.version), $ = ` ${k} float process(int indices[${L}]) { int b[1]; b[0] = indices[1]; int im2col[4]; im2col[0] = indices[0]; im2col[1] = indices[2]; im2col[2] = indices[3]; int im2colOffset = im2col[0] * ${E[0]} + im2col[1] * ${E[1]} + im2col[2] * ${E[2]}; int kernelOffset = indices[1] * ${T[1]}; float value = ${V}; for (int i = 0; i < ${R}; ++i) { vec2 im2colCoords = offsetToCoords(im2colOffset, ${v}, ${P}); vec2 kernelCoords = offsetToCoords(kernelOffset, ${S}, ${O}); value += dot(${C.texture2D}(Im2Col, im2colCoords), ${C.texture2D}(K, kernelCoords)); ++im2colOffset; ++kernelOffset; } ${Y} return value; }`; return Object.assign(Object.assign({}, i), { output: { dims: g, type: d[0].type, textureType: f.TextureType.unpacked }, shaderSource: $ }) })(p, e, l, o, t) }) } }, 7992: (b, n, a) => { Object.defineProperty(n, "__esModule", { value: !0 }), n.parseFlattenAttributes = n.flatten = void 0; const u = a(2517); n.flatten = (f, s, h) => { c(s, h); const p = u.ShapeUtil.flattenShape(s[0].dims, h); return [f.reshapeUnpacked(s[0], p)] }, n.parseFlattenAttributes = f => f.attributes.getInt("axis", 1); const c = (f, s) => { if (!f || f.length !== 1) throw new Error("Flatten requires 1 input."); const h = f[0].dims.length; if (h === 0) throw new Error("scalar tensor is not supported."); if (s < -h || s > h) throw new Error("Invalid axis"); if (f[0].type === "string") throw new Error("string tensor is not supported.") } }, 2823: (b, n, a) => { Object.defineProperty(n, "__esModule", { value: !0 }), n.parseInternalActivationAttributes = n.getActivationSnippet = void 0; const u = a(2517), c = a(4909); n.getActivationSnippet = function(f) { let s; switch (f.activation) { case "Relu": s = (0, c.glslRelu)(); break; case "Sigmoid": s = (0, c.glslSigmoid)(); break; case "Clip": s = (0, c.glslClip)(f.clipMin, f.clipMax); break; default: return { activationFunction: "", applyActivation: "" } } const h = s.name; return { activationFunction: s.body, applyActivation: `value = ${h}_(value);` } }, n.parseInternalActivationAttributes = f => { const s = f.getString("activation", ""); if (s === "Clip") { const [h, p] = f.getFloats("activation_params", [u.MIN_CLIP, u.MAX_CLIP]); return { activation: s, clipMax: p, clipMin: h, activationCacheKey: `${s}:${h},${p}` } } return { activation: s, activationCacheKey: s } } }, 1253: (b, n, a) => { Object.defineProperty(n, "__esModule", { value: !0 }), n.parseGatherAttributes = n.gather = void 0; const u = a(246), c = a(782), f = a(2517), s = a(2039); n.gather = (o, t, e) => (l(t, e.axis), [o.run(p(o, t, e), t)]), n.parseGatherAttributes = o => (0, u.createAttributeWithCacheKey)({ axis: o.attributes.getInt("axis", 0) }); const h = { name: "Gather", inputNames: ["A", "B"], inputTypes: [s.TextureType.unpacked, s.TextureType.unpacked] }, p = (o, t, e) => { const r = Object.assign(Object.assign({}, h), { cacheHint: e.cacheKey }); return Object.assign(Object.assign({}, r), { get: () => ((i, d, g, m) => { const _ = g[0].dims.slice(), y = g[1].dims.slice(), T = new Array(_.length + y.length - 1); m = f.ShapeUtil.normalizeAxis(m, _.length); const w = []; for (let O = 0; O < T.length; O++) O < m ? (T[O] = _[O], w.push(`inputIdx[${O}] = outputIdx[${O}];`)) : O < m + y.length ? (T[O] = y[O - m], w.push(`indexDataIdx[${O-m}] = outputIdx[${O}];`)) : (T[O] = _[O - y.length + 1], w.push(`inputIdx[${O-y.length+1}] = outputIdx[${O}];`)); const S = ` float process(int outputIdx[${T.length||1}]) { int inputIdx[${_.length}]; int indexDataIdx[${y.length||1}]; indexDataIdx[0] = 0; ${w.join(` `)} int idx = int(_B(indexDataIdx)); inputIdx[${m}] = idx < 0 ? idx + ${_[m]} : idx; return _A(inputIdx); }`; return Object.assign(Object.assign({}, d), { output: { dims: T, type: g[0].type, textureType: s.TextureType.unpacked }, shaderSource: S }) })(0, r, t, e.axis) }) }, l = (o, t) => { if (!o || o.length !== 2) throw new Error("Gather requires 2 inputs."); const e = o[0].dims.length; if (e < 1) throw new Error("Invalid input shape."); if (t < -e || t > e - 1) throw new Error("Invalid axis."); if (c.NUMBER_TYPES.indexOf(o[0].type) === -1) throw new Error("Invaid input type."); if (o[1].type !== "int32" && o[1].type !== "int16") throw new Error("Invaid input type.") } }, 4776: (b, n, a) => { Object.defineProperty(n, "__esModule", { value: !0 }), n.parseGemmAttributesV11 = n.parseGemmAttributesV7 = n.gemm = void 0; const u = a(246), c = a(2517), f = a(2039); n.gemm = (o, t, e) => (l(t, e), [o.run(h(t, e), t)]); const s = (o, t) => { const e = o.attributes.getInt("transA", 0) !== 0, r = o.attributes.getInt("transB", 0) !== 0, i = o.attributes.getFloat("alpha", 1), d = o.attributes.getFloat("beta", 1); return (0, u.createAttributeWithCacheKey)({ transA: e, transB: r, alpha: i, beta: d, isOptionalC: t }) }; n.parseGemmAttributesV7 = o => s(o, !1), n.parseGemmAttributesV11 = o => s(o, !0); const h = (o, t) => { const e = { name: "Gemm", inputNames: o.length === 3 ? ["A", "B", "C"] : ["A", "B"], inputTypes: o.length === 3 ? [f.TextureType.unpacked, f.TextureType.unpacked, f.TextureType.unpacked] : [f.TextureType.unpacked, f.TextureType.unpacked], key: t.cacheKey }; return Object.assign(Object.assign({}, e), { get: () => p(e, o, t) }) }, p = (o, t, e) => { const r = t[0].dims.slice(), i = t[1].dims.slice(), [d, g] = c.GemmUtil.getShapeOfGemmResult(r, e.transA, i, e.transB, t.length === 3 ? t[2].dims : void 0), m = [d, g]; if (!m) throw new Error("Can't use gemm on the given tensors"); let _ = r[r.length - 1], y = ""; e.transA && (_ = r[0]), e.transA && e.transB ? y = "value += _A_T(a) * _B_T(b);" : e.transA && !e.transB ? y = "value += _A_T(a) * _B(b);" : !e.transA && e.transB ? y = "value += _A(a) * _B_T(b);" : e.transA || e.transB || (y = "value += _A(a) * _B(b);"); const T = m.length, w = ` float process(int indices[${T}]) { int a[${T}]; int b[${T}]; ${t.length===3?`int c[${t[2].dims.length}];`:""} copyVec(indices, a); copyVec(indices, b); ${t.length===3?"bcastIndices_C(indices, c);":""} float value = 0.0; for (int k=0; k<${_}; ++k) { a[${T-1}] = k; b[${T-2}] = k; ${y} } value = value * alpha; ${t.length===3?"value += beta * _C(c);":""} return value; }`; return Object.assign(Object.assign({}, o), { output: { dims: m, type: t[0].type, textureType: f.TextureType.unpacked }, variables: [{ name: "alpha", type: "float", data: e.alpha }, { name: "beta", type: "float", data: e.beta }], shaderSource: w }) }, l = (o, t) => { if (!o) throw new Error("Input is missing"); if (t.isOptionalC && (o.length < 2 || o.length > 3)) throw new Error("Invaid input shape."); if (!t.isOptionalC && o.length !== 3) throw new Error("Gemm requires 3 inputs"); if (o.length === 3 && o[2].dims.length !== 1 && o[2].dims.length !== 2) throw new Error("Invalid input shape of C"); if (o[0].type !== "float32" && o[0].type !== "float64" || o[1].type !== "float32" && o[1].type !== "float64" || o.length === 3 && o[2].type !== "float32" && o[2].type !== "float64") throw new Error("Invalid input type."); if (o[0].type !== o[1].type || o.length === 3 && o[0].type !== o[2].type) throw new Error("Input types are mismatched") } }, 8555: (b, n, a) => { Object.defineProperty(n, "__esModule", { value: !0 }), n.createPackedIm2ColProgramInfoLoader = void 0; const u = a(5060), c = a(2039), f = a(2827); n.createPackedIm2ColProgramInfoLoader = (s, h, p, l, o) => { const t = (e = o.cacheKey, { name: "Im2Col (packed)", inputNames: ["A"], inputTypes: [c.TextureType.packed], cacheHint: e }); var e; return Object.assign(Object.assign({}, t), { get: () => ((r, i, d, g, m, _) => { const y = d.dims, T = g.dims, w = m.length, S = [T[1] * T[2] * T[3], m[2] * m[3]], O = T[2] * T[3], E = (0, f.unpackFromChannel)(), v = (0, u.getGlsl)(r.session.backend.glContext.version); let P = ""; for (let V = 0; V <= 1; V++) for (let R = 0; R <= 1; R++) P += ` blockIndex = rc.x + ${R}; pos = rc.y + ${V}; if(blockIndex < ${S[1]} && pos < ${S[0]}) { offsetY = int(blockIndex / (${m[w-1]})) * ${_.strides[0]} - ${_.pads[0]}; d0 = offsetY + ${_.dilations[0]} * (imod(pos, ${O}) / ${T[2]}); if(d0 < ${y[2]} && d0 >= 0) { offsetX = imod(blockIndex, ${m[w-1]}) * ${_.strides[1]} - ${_.pads[1]}; d1 = offsetX + ${_.dilations[1]} * imod(imod(pos, ${O}), ${T[2]}); if(d1 < ${y[3]} && d1 >= 0) { ch = int(float(pos)/ ${O}.); innerDims = vec2(d0, d1); result[${2*V+R}] = getChannel( getA(0, ch, int(innerDims.x), int(innerDims.y)), innerDims); } } } `; const L = ` ${E} void main() { ivec2 rc = getOutputCoords(); vec4 result = vec4(0.0); int blockIndex, pos, offsetY, d0, offsetX, d1, ch; vec2 innerDims; ${P} ${v.output} = result; } `; return Object.assign(Object.assign({}, i), { output: { dims: S, type: d.type, textureType: c.TextureType.packed }, shaderSource: L, hasMain: !0 }) })(s, t, h, p, l, o) }) } }, 3248: (b, n, a) => { Object.defineProperty(n, "__esModule", { value: !0 }), n.calculateIm2ColDims = n.createIm2ColProgramInfoLoader = void 0; const u = a(2039); n.createIm2ColProgramInfoLoader = (c, f, s, h, p) => { const l = (o = p.cacheKey, { name: "Im2Col", inputNames: ["X"], inputTypes: [u.TextureType.unpacked], cacheHint: o }); var o; return Object.assign(Object.assign({}, l), { get: () => ((t, e, r, i, d, g) => { const m = r.dims, _ = i.dims, y = d.length, T = (0, n.calculateIm2ColDims)(m, _, d, 4), w = ` const int XC = ${m[1]}; const int XH = ${m[2]}; const int XW = ${m[3]}; const int KH = ${g.kernelShape[0]}; const int KW = ${g.kernelShape[1]}; const int dilationH = ${g.dilations[0]}; const int dilationW = ${g.dilations[1]}; const int strideH = ${g.strides[0]}; const int strideW = ${g.strides[1]}; const int padH = ${g.pads[0]}; const int padW = ${g.pads[1]}; const int KHKW = KH*KW; const int XCKHKW = XC * KHKW; const int outputChannels = 4; vec4 process(int indices[${y}]) { int b = indices[0]; // batch size int oh = indices[1] * strideH - padH; //output height int ow = indices[2] * strideW - padW; //output width int p = indices[3] * outputChannels; //patch vec4 value = vec4(0.0); for(int i=0; i < outputChannels; ++i) { if(p < XCKHKW) { int patchC = p / KHKW; int patchH = (p - patchC*KHKW) / KW; int patchW = (p - patchC*KHKW) - patchH * KW; int xh2 = oh + patchH * dilationH; int xw2 = ow + patchW * dilationW; int x[${m.length}]; x[0] = b; x[1] = patchC; x[2] = xh2; x[3] = xw2; if(xh2 >= 0 && xh2 < XH && xw2 >= 0 && xw2 < XW) { value[i] = _X(x); } } ++p; } return value; } `; return Object.assign(Object.assign({}, e), { output: { dims: T, type: r.type, textureType: u.TextureType.packedLastDimension }, shaderSource: w }) })(0, l, f, s, h, p) }) }, n.calculateIm2ColDims = (c, f, s, h = 4) => [s[0], s[2], s[3], Math.ceil(c[1] * f[2] * f[3] / h)] }, 6572: (b, n, a) => { Object.defineProperty(n, "__esModule", { value: !0 }), n.parseImageScalerAttributes = n.imageScaler = void 0; const u = a(246), c = a(2039); n.imageScaler = (l, o, t) => (p(o), [l.run(s(l, o, t), o)]), n.parseImageScalerAttributes = l => { const o = l.attributes.getFloat("scale"), t = l.attributes.getFloats("bias"); return (0, u.createAttributeWithCacheKey)({ scale: o, bias: t }) }; const f = { name: "ImageScaler", inputNames: ["X"], inputTypes: [c.TextureType.unpacked] }, s = (l, o, t) => { const e = Object.assign(Object.assign({}, f), { cacheHint: t.cacheKey }); return Object.assign(Object.assign({}, e), { get: () => ((r, i, d, g) => { const m = d[0].dims.slice(), _ = m.length, y = ` ${h(g.bias.length)} float process(int indices[${_}]) { return _X(indices) * scale + getBias(bias, indices[1]); }`; return Object.assign(Object.assign({}, i), { output: { dims: m, type: d[0].type, textureType: c.TextureType.unpacked }, variables: [{ name: "bias", type: "float", arrayLength: g.bias.length, data: g.bias }, { name: "scale", type: "float", data: g.scale }], shaderSource: y }) })(0, e, o, t) }) }, h = l => { const o = [`float getBias(float bias[${l}], int channel) {`]; for (let t = 0; t < l; ++t) t === 0 ? o.push(` if (channel == ${t}) { return bias[${t}]; }`) : t === l - 1 ? o.push(` else { return bias[${t}]; }`) : o.push(` else if (channel == ${t}) { return bias[${t}]; }`); return o.push(" }"), o.join(` `) }, p = l => { if (!l || l.length !== 1) throw new Error("ImageScaler requires 1 input."); if (l[0].dims.length !== 4) throw new Error("Invalid input shape."); if (l[0].type !== "float32" && l[0].type !== "float64") throw new Error("Invalid input type.") } }, 3346: (b, n, a) => { Object.defineProperty(n, "__esModule", { value: !0 }), n.parseInstanceNormalizationAttributes = n.instanceNormalization = void 0; const u = a(5060), c = a(2039); n.instanceNormalization = (o, t, e) => { l(t); const r = o.run(s(t[0]), t); return [o.run(p(o, t[0], e, r.dims), [t[0], r, t[1], t[2]])] }, n.parseInstanceNormalizationAttributes = o => o.attributes.getFloat("epsilon", 1e-5); const f = { name: "InstanceNormalization_MeanAndVariance", inputNames: ["X"], inputTypes: [c.TextureType.unpacked] }, s = o => Object.assign(Object.assign({}, f), { get: () => ((t, e) => { const r = e.dims.slice(), i = r[1], d = r[2] * r[3], g = [r[0], i], m = ` vec4 process(int[2] indices) { vec4 v = vec4(0.0); int a[4]; a[0] = indices[0]; a[1] = indices[1]; float temp = 0.0; for(int a2=0; a2<${r[2]}; a2++) { a[2] = a2; for(int a3=0; a3<${r[3]}; a3++) { a[3] = a3; float x = _X(a); temp += x; } } float mean = temp / float(${d}); temp = 0.0; for(int a2=0; a2<${r[2]}; a2++) { a[2] = a2; for(int a3=0; a3<${r[3]}; a3++) { a[3] = a3; float x = _X(a); temp += (x - mean) * (x - mean); } } v.r = mean; v.g = temp / float(${d}); return v; }`; return Object.assign(Object.assign({}, t), { output: { dims: g, type: e.type, textureType: c.TextureType.packedLastDimension }, shaderSource: m }) })(f, o) }), h = { name: "InstanceNormalization_ComputeOutput", inputNames: ["X", "MeanAndVariance", "Scale", "B"], inputTypes: [c.TextureType.unpacked, c.TextureType.packedLastDimension, c.TextureType.unpacked, c.TextureType.unpacked] }, p = (o, t, e, r) => { const i = Object.assign(Object.assign({}, h), { cacheHint: `${e}` }); return Object.assign(Object.assign({}, i), { get: () => ((d, g, m, _, y) => { const T = (0, u.getGlsl)(d.session.backend.glContext.version), [w, S] = d.calculateTextureWidthAndHeight(y, c.TextureType.packedLastDimension), [O, E] = [w / 4, S], v = ` vec4 get_MeanAndVariance(int[2] mv) { int offset = indicesToOffset_MeanAndVariance(mv); vec2 coords = offsetToCoords(offset, ${O}, ${E}); return ${T.texture2D}(MeanAndVariance, coords); } float process(int[4] indices) { int mv[2]; mv[0] = indices[0]; mv[1] = indices[1]; vec4 mean_and_variance = get_MeanAndVariance(mv); float mean = mean_and_variance.r; float variance = mean_and_variance.g; int sb[1]; sb[0] = indices[1]; float scale = _Scale(sb); float b = _B(sb); return scale * (_X(indices) - mean) / sqrt(variance + epsilon) + b; }`; return Object.assign(Object.assign({}, g), { output: { dims: m.dims, type: m.type, textureType: c.TextureType.unpacked }, variables: [{ name: "epsilon", type: "float", data: _ }], shaderSource: v }) })(o, i, t, e, r) }) }, l = o => { if (!o || o.length !== 3) throw new Error("InstanceNormalization requires 3 inputs."); const t = o[0], e = o[1], r = o[2]; if (t.dims.length < 3 || e.dims.length !== 1 || r.dims.length !== 1) throw new Error("Invalid input shape."); if (e.dims[0] !== t.dims[1] || r.dims[0] !== t.dims[1]) throw new Error("Input shapes are mismatched."); if (t.type !== "float32" && t.type !== "float64" || e.type !== "float32" && e.type !== "float64" || r.type !== "float32" && r.type !== "float64") throw new Error("Invalid input type."); if (o[0].dims.length !== 4) throw new Error("Only support 4-D input shape.") } }, 708: (b, n, a) => { Object.defineProperty(n, "__esModule", { value: !0 }), n.createPackedMatmulProgramInfoLoader = void 0; const u = a(2517), c = a(5060), f = a(2039), s = a(9390), h = a(2823), p = a(5623); n.createPackedMatmulProgramInfoLoader = (l, o, t) => { const e = (r = o.length > 2, i = t.activationCacheKey, { name: "MatMul (packed)", inputNames: r ? ["A", "B", "Bias"] : ["A", "B"], inputTypes: r ? [f.TextureType.packed, f.TextureType.packed, f.TextureType.packed] : [f.TextureType.packed, f.TextureType.packed], cacheHint: i }); var r, i; return Object.assign(Object.assign({}, e), { get: () => ((d, g, m, _) => { const y = m.length > 2, T = y ? "value += getBiasForMatmul();" : "", w = m[0].dims, S = m[1].dims, O = u.BroadcastUtil.calcShape(w, S, !0), E = !u.ShapeUtil.areEqual(m[0].dims, m[1].dims); if (!O) throw new Error("Can't use matmul on the given tensors"); const v = w[w.length - 1], P = Math.ceil(v / 2), L = w.length, V = S.length, R = (0, c.getGlsl)(d.session.backend.glContext.version), k = (0, s.getCoordsDataType)(O.length), Y = O.length, C = (0, s.getGlChannels)(), { activationFunction: $, applyActivation: X } = (0, h.getActivationSnippet)(_), z = y ? `${(0,p.getBiasForMatmul)(k,C,m[2].dims,O,!0)}` : "", Z = E ? `${function(Te,se,ye,be){let Ie=[],Le=[];const ve=ye[0].dims,Ne=ye[1].dims,Fe=ve.length,Me=Ne.length,Oe=be.length,Be=Oe-Fe,Ue=Oe-Me;Ie=ve.map((Ae,Re)=>`coords.${se[Re+Be]}`),Ie[Fe-1]="i*2",Ie.join(", "),Le=Ne.map((Ae,Re)=>`coords.${se[Re+Ue]}`),Le[Me-2]="i*2",Le.join(", ");const ze=u.BroadcastUtil.getBroadcastDims(ve,be),He=u.BroadcastUtil.getBroadcastDims(Ne,be),Ke=ze.map(Ae=>`coords.${se[Ae+Be]} = 0;`).join(` `),Ge=He.map(Ae=>`coords.${se[Ae+Ue]} = 0;`).join(` `),Ve=`int lastDim = coords.${se[Oe-1]}; coords.${se[Oe-1]} = coords.${se[Oe-2]}; coords.${se[Oe-2]} = lastDim;`;return` vec4 getAAtOutCoordsMatmul(int i) { ${Te} coords = getOutputCoords(); ${Ve} ${Ke} vec4 outputValue = getA(${Ie}); return outputValue; } vec4 getBAtOutCoordsMatmul(int i) { ${Te} coords = getOutputCoords(); ${Ve} ${Ge} vec4 outputValue = getB(${Le}); return outputValue; }`}(k,C,m,O)}` : "", J = E ? "getAAtOutCoordsMatmul(i)" : `getA(${function(Te,se){let ye="";for(let be=0;be { Object.defineProperty(n, "__esModule", { value: !0 }), n.getBiasForMatmul = n.createMatmulProgramInfoLoader = n.parseMatMulAttributes = n.matMul = void 0; const u = a(2517), c = a(2039), f = a(9390), s = a(2823), h = a(708); function p(t, e) { const r = (i = t.length > 2, d = e.activationCacheKey, { name: "MatMul", inputNames: i ? ["A", "B", "Bias"] : ["A", "B"], inputTypes: i ? [c.TextureType.unpacked, c.TextureType.unpacked, c.TextureType.unpacked] : [c.TextureType.unpacked, c.TextureType.unpacked], cacheHint: d }); var i, d; return Object.assign(Object.assign({}, r), { get: () => function(g, m, _) { const y = m[0].dims, T = m[1].dims, w = u.BroadcastUtil.calcShape(y, T, !0); if (!w) throw new Error("Can't use matmul on the given tensors"); const S = (0, f.getCoordsDataType)(w.length), O = (0, f.getGlChannels)(), { activationFunction: E, applyActivation: v } = (0, s.getActivationSnippet)(_), P = m.length > 2, L = P ? "value += getBiasForMatmul();" : "", V = P ? `${o(S,O,m[2].dims,w,!1)}` : "", R = w.length, k = y.length, Y = T.length, C = ` ${E} ${V} float process(int indices[${R}]) { int a[${k}]; int b[${Y}]; bcastMatmulIndices_A(indices, a); bcastMatmulIndices_B(indices, b); float value; for (int k=0; k<${y[y.length-1]}; ++k) { a[${k-1}] = k; b[${Y-2}] = k; value += _A(a) * _B(b); } ${L} ${v} return value; }`; return Object.assign(Object.assign({}, g), { output: { dims: w, type: m[0].type, textureType: c.TextureType.unpacked }, shaderSource: C }) }(r, t, e) }) } n.matMul = (t, e, r) => (l(e), t.session.pack ? [t.run((0, h.createPackedMatmulProgramInfoLoader)(t, e, r), e)] : [t.run(p(e, r), e)]), n.parseMatMulAttributes = t => (0, s.parseInternalActivationAttributes)(t.attributes), n.createMatmulProgramInfoLoader = p; const l = t => { if (!t || t.length !== 2) throw new Error("MatMul requires 2 inputs."); if (t[0].dims[t[0].dims.length - 1] !== t[1].dims[t[1].dims.length - 2]) throw new Error("shared dimension does not match."); if (t[0].type !== "float32" && t[0].type !== "float64" || t[1].type !== "float32" && t[1].type !== "float64") throw new Error("inputs should be float type"); if (t[0].type !== t[1].type) throw new Error("inputs types should match") }; function o(t, e, r, i, d) { let g = ""; const m = r.length, _ = i.length, y = _ - m; g = _ < 2 && m > 0 ? "coords" : r.map((S, O) => `coords.${e[O+y]}`).join(", "); const T = u.BroadcastUtil.getBroadcastDims(r, i).map(S => `coords.${e[S+y]} = 0;`).join(` `); let w = "vec4(outputValue.xx, outputValue.yy)"; return u.ShapeUtil.size(r) === 1 && (w = "vec4(outputValue.x)"), d ? ` vec4 getBiasForMatmul() { ${t} coords = getOutputCoords(); ${T} vec4 outputValue = getBias(${g}); return ${w}; }` : ` float getBiasForMatmul() { ${t} coords = getOutputCoords(); ${T} return getBias(coords.x); }` } n.getBiasForMatmul = o }, 2403: (b, n, a) => { Object.defineProperty(n, "__esModule", { value: !0 }), n.createPackProgramInfoLoader = void 0; const u = a(5060), c = a(2039), f = a(9390), s = a(2827), h = { name: "pack", inputNames: ["A"], inputTypes: [c.TextureType.unpackedReversed] }; n.createPackProgramInfoLoader = (p, l) => Object.assign(Object.assign({}, h), { get: () => ((o, t) => { const e = (0, u.getGlsl)(o.session.backend.glContext.version), r = t.dims, i = r.length, d = t.dims.length, g = (0, f.getCoordsDataType)(d), m = (0, s.getChannels)("rc", d), _ = (y = d, T = m, w = r[r.length - 2], S = r[r.length - 1], y === 0 || y === 1 ? "" : ` int r = ${T[y-2]}; int c = ${T[y-1]}; int rp1 = ${T[y-2]} + 1; int cp1 = ${T[y-1]} + 1; bool rEdge = rp1 >= ${S}; bool cEdge = cp1 >= ${w}; `); var y, T, w, S; let O; O = i === 0 ? [1, 1] : i === 1 ? [r[0], 1] : [r[d - 1], r[d - 2]]; const E = function(L, V, R) { if (L === 0) return "false"; if (L === 1) return `rc > ${V[0]}`; let k = ""; for (let Y = L - 2; Y < L; Y++) k += `${R[Y]} >= ${V[Y-L+2]}`, Y < L - 1 && (k += "||"); return k }(d, O, m), v = function(L, V) { const R = L.length; if (R === 0) return "getA(), 0, 0, 0"; if (R === 1) return `getA(rc), rc + 1 >= ${L[0]} ? 0. : getA(rc + 1), 0, 0`; let k = ""; if (R > 2) for (let Y = 0; Y < R - 2; ++Y) k += `${V[Y]},`; return `getA(${k}r, c), rEdge ? 0. : getA(${k}rp1, c), cEdge ? 0. : getA(${k}r, cp1), rEdge || cEdge ? 0. : getA(${k}rp1, cp1)` }(r, m), P = ` void main() { ${g} rc = getOutputCoords(); if(${E}) { ${e.output} = vec4(0); } else { ${_} ${e.output} = vec4(${v}); } } `; return Object.assign(Object.assign({}, h), { hasMain: !0, output: { dims: t.dims, type: t.type, textureType: c.TextureType.packed }, shaderSource: P }) })(p, l) }) }, 2827: (b, n, a) => { Object.defineProperty(n, "__esModule", { value: !0 }), n.unpackFromChannel = n.getChannels = n.getVecChannels = void 0; const u = a(9390); function c(f, s) { return (0, u.getGlChannels)(s).map(h => `${f}.${h}`) } n.getVecChannels = c, n.getChannels = function(f, s) { return s === 1 ? [f] : c(f, s) }, n.unpackFromChannel = function() { return ` float getChannel(vec4 frag, int dim) { int modCoord = imod(dim, 2); return modCoord == 0 ? frag.r : frag.g; } float getChannel(vec4 frag, vec2 innerDims) { vec2 modCoord = mod(innerDims, 2.); return modCoord.x == 0. ? (modCoord.y == 0. ? frag.r : frag.g) : (modCoord.y == 0. ? frag.b : frag.a); } ` } }, 2870: (b, n, a) => { Object.defineProperty(n, "__esModule", { value: !0 }), n.parsePadAttributesV11 = n.padV11 = n.parsePadAttributesV2 = n.padV2 = void 0; const u = a(246), c = a(2517), f = a(5060), s = a(2039), h = { name: "Pad", inputNames: ["A"], inputTypes: [s.TextureType.unpacked] }; n.padV2 = (g, m, _) => (o(m), [g.run(Object.assign(Object.assign({}, h), { cacheHint: _.cacheKey, get: () => l(g, m[0], _) }), m)]), n.parsePadAttributesV2 = g => { const m = g.attributes.getString("mode", "constant"), _ = g.attributes.getFloat("value", 0), y = g.attributes.getInts("pads"); return (0, u.createAttributeWithCacheKey)({ mode: m, value: _, pads: y }) }, n.padV11 = (g, m, _) => { t(m); const y = p(g, m, _); return (0, n.padV2)(g, [m[0]], y) }, n.parsePadAttributesV11 = g => g.attributes.getString("mode", "constant"); const p = (g, m, _) => { if (!g.session.isInitializer(m[1].dataId) || m.length >= 3 && !g.session.isInitializer(m[2].dataId)) throw new Error("dynamic pad attributes are not allowed"); const y = Array.from(m[1].integerData), T = m.length >= 3 ? m[2].floatData[0] : 0; return (0, u.createAttributeWithCacheKey)({ mode: _, pads: y, value: T }) }, l = (g, m, _) => { const y = c.ShapeUtil.padShape(m.dims.slice(), _.pads), T = y.length, w = ` ${e(g,m,_)} float process(int[${T}] indices) { return padA(indices); }`; return { name: "Pad", inputNames: ["A"], inputTypes: [s.TextureType.unpacked], output: { dims: y, type: m.type, textureType: s.TextureType.unpacked }, shaderSource: w } }, o = g => { if (!g || g.length !== 1) throw new Error("Pad requires 1 input"); if (g[0].type !== "float32" && g[0].type !== "float64") throw new Error("Invalid input type.") }, t = g => { if (!g || g.length !== 2 && g.length !== 3) throw new Error("Pad requires 2 or 3 inputs"); if (g[1].type !== "int32") throw new Error("Invalid input type."); if (g.length >= 3 && g[2].type === "string") throw new Error("Invalid input type.") }, e = (g, m, _) => { const y = (0, f.getGlsl)(g.session.backend.glContext.version), [T, w] = g.calculateTextureWidthAndHeight(m.dims, s.TextureType.unpacked), S = c.ShapeUtil.computeStrides(m.dims); switch (_.mode) { case "constant": return r(y, m.dims, S, T, w, _.pads, _.value); case "reflect": return i(y, m.dims, S, T, w, _.pads); case "edge": return d(y, m.dims, S, T, w, _.pads); default: throw new Error("Invalid mode") } }, r = (g, m, _, y, T, w, S) => { const O = m.length; let E = ""; for (let v = O - 1; v >= 0; --v) E += ` k = m[${v}] - ${w[v]}; if (k < 0) return constant; if (k >= ${m[v]}) return constant; offset += k * ${_[v]}; `; return ` float padA(int m[${O}]) { const float constant = float(${S}); int offset = 0; int k = 0; ${E} vec2 coords = offsetToCoords(offset, ${y}, ${T}); float value = getColorAsFloat(${g.texture2D}(A, coords)); return value; } ` }, i = (g, m, _, y, T, w) => { const S = m.length; let O = ""; for (let E = S - 1; E >= 0; --E) O += ` k = m[${E}] - ${w[E]}; if (k < 0) { k = -k; } { const int _2n_1 = ${2*(m[E]-1)}; k = int( mod( float(k), float(_2n_1) ) ) ; if(k >= ${m[E]}) { k = _2n_1 - k; } } offset += k * ${_[E]}; `; return ` float padA(int m[${S}]) { int offset = 0; int k = 0; ${O} vec2 coords = offsetToCoords(offset, ${y}, ${T}); float value = getColorAsFloat(${g.texture2D}(A, coords)); return value; } ` }, d = (g, m, _, y, T, w) => { const S = m.length; let O = ""; for (let E = S - 1; E >= 0; --E) O += ` k = m[${E}] - ${w[E]}; if (k < 0) k = 0; if (k >= ${m[E]}) k = ${m[E]-1}; offset += k * ${_[E]}; `; return ` float padA(int m[${S}]) { int offset = 0; int k = 0; ${O} vec2 coords = offsetToCoords(offset, ${y}, ${T}); float value = getColorAsFloat(${g.texture2D}(A, coords)); return value; } ` } }, 2143: (b, n, a) => { Object.defineProperty(n, "__esModule", { value: !0 }), n.globalMaxPool = n.parseMaxPoolAttributes = n.maxPool = n.parseGlobalAveragePoolAttributes = n.globalAveragePool = n.parseAveragePoolAttributes = n.averagePool = void 0; const u = a(246), c = a(2517), f = a(2039); n.averagePool = (d, g, m) => { t(g); const _ = { name: "AveragePool", inputNames: ["X"], inputTypes: [f.TextureType.unpacked], cacheHint: m.cacheKey }; return [d.run(Object.assign(Object.assign({}, _), { get: () => s(g, _, !1, m) }), g)] }, n.parseAveragePoolAttributes = d => { const g = d.attributes.getString("auto_pad", "NOTSET"), m = d.attributes.getInt("ceil_mode", 0), _ = d.attributes.getInt("count_include_pad", 0) !== 0, y = d.attributes.getInts("kernel_shape"), T = d.attributes.getInts("strides", []), w = d.attributes.getInts("pads", []); if (m !== 0) throw new Error("using ceil() in shape computation is not yet supported for AveragePool"); return (0, u.createAttributeWithCacheKey)({ autoPad: g, ceilMode: m, countIncludePad: _, kernelShape: y, strides: T, pads: w }) }; const s = (d, g, m, _) => { const [y, T] = p(d, _, m), w = c.ShapeUtil.size(y.kernelShape); let S = ""; y.countIncludePad ? S += `value /= float(${w});` : S += `value /= float(${w} - pad);`; const O = ` ${e(d[0].dims,y,"value += _X(x);",S,"0.0")} `; return Object.assign(Object.assign({}, g), { output: { dims: T, type: d[0].type, textureType: f.TextureType.unpacked }, shaderSource: O }) }; n.globalAveragePool = (d, g, m) => { t(g); const _ = { name: "GlobalAveragePool", inputNames: ["X"], inputTypes: [f.TextureType.unpacked], cacheHint: `${m.countIncludePad}` }; return [d.run(Object.assign(Object.assign({}, _), { get: () => s(g, _, !0, m) }), g)] }, n.parseGlobalAveragePoolAttributes = d => { const g = d.attributes.getInt("count_include_pad", 0) !== 0; return (0, u.createAttributeWithCacheKey)({ autoPad: "", ceilMode: 0, countIncludePad: g, kernelShape: [], strides: [], pads: [] }) }, n.maxPool = (d, g, m) => { t(g); const _ = { name: "MaxPool", inputNames: ["X"], inputTypes: [f.TextureType.unpacked], cacheHint: m.cacheKey }; return [d.run(Object.assign(Object.assign({}, _), { get: () => h(g, _, !1, m) }), g)] }, n.parseMaxPoolAttributes = d => { const g = d.attributes.getString("auto_pad", "NOTSET"), m = d.attributes.getInt("ceil_mode", 0), _ = d.attributes.getInts("kernel_shape"), y = d.attributes.getInts("strides", []), T = d.attributes.getInts("pads", []), w = d.attributes.getInt("storage_order", 0), S = d.attributes.getInts("dilations", []); if (w !== 0) throw new Error("column major storage order is not yet supported for MaxPool"); if (m !== 0) throw new Error("using ceil() in shape computation is not yet supported for MaxPool"); return (0, u.createAttributeWithCacheKey)({ autoPad: g, ceilMode: m, countIncludePad: !1, kernelShape: _, strides: y, pads: T, storageOrder: w, dilations: S }) }; const h = (d, g, m, _) => { const [y, T] = p(d, _, m), w = ` ${e(d[0].dims,y,` value = max(_X(x), value); `,"","-1e5")} `; return Object.assign(Object.assign({}, g), { output: { dims: T, type: d[0].type, textureType: f.TextureType.unpacked }, shaderSource: w }) }, p = (d, g, m) => { const _ = d[0].dims.slice(), y = Object.hasOwnProperty.call(g, "dilations"), T = g.kernelShape.slice(), w = g.strides.slice(), S = y ? g.dilations.slice() : [], O = g.pads.slice(); c.PoolConvUtil.adjustPoolAttributes(m, _, T, w, S, O); const E = c.PoolConvUtil.computePoolOutputShape(m, _, w, S, T, O, g.autoPad), v = Object.assign({}, g); return y ? Object.assign(v, { kernelShape: T, strides: w, pads: O, dilations: S, cacheKey: g.cacheKey }) : Object.assign(v, { kernelShape: T, strides: w, pads: O, cacheKey: g.cacheKey }), [v, E] }, l = { autoPad: "", ceilMode: 0, countIncludePad: !1, kernelShape: [], strides: [], pads: [], storageOrder: 0, dilations: [], cacheKey: "" }, o = { name: "GlobalMaxPool", inputNames: ["X"], inputTypes: [f.TextureType.unpacked] }; n.globalMaxPool = (d, g) => (t(g), [d.run(Object.assign(Object.assign({}, o), { get: () => h(g, o, !0, l) }), g)]); const t = d => { if (!d || d.length !== 1) throw new Error("Pool ops requires 1 input."); if (d[0].type !== "float32" && d[0].type !== "float64") throw new Error("Invalid input type.") }, e = (d, g, m, _, y) => { const T = d.length; if (g.kernelShape.length <= 2) { const w = g.kernelShape[g.kernelShape.length - 1], S = g.strides[g.strides.length - 1], O = g.pads[g.pads.length / 2 - 1], E = g.pads[g.pads.length - 1], v = d[T - 1]; let P = "", L = "", V = ""; if (P = O + E !== 0 ? ` for (int i = 0; i < ${w}; i++) { x[${T} - 1] = indices[${T} - 1] * ${S} - ${O} + i; if (x[${T} - 1] < 0 || x[${T} - 1] >= ${v}) { pad++; continue; } ${m} }` : ` for (int i = 0; i < ${w}; i++) { x[${T} - 1] = indices[${T} - 1] * ${S} - ${O} + i; ${m} }`, g.kernelShape.length === 2) { const R = g.kernelShape[g.kernelShape.length - 2], k = g.strides[g.strides.length - 2], Y = g.pads[g.pads.length / 2 - 2], C = g.pads[g.pads.length - 2], $ = d[T - 2]; L = Y + C !== 0 ? ` for (int j = 0; j < ${R}; j++) { x[${T} - 2] = indices[${T} - 2] * ${k} - ${Y} + j; if (x[${T} - 2] < 0 || x[${T} - 2] >= ${$}) { pad+= ${w}; continue; } ` : ` for (int j = 0; j < ${R}; j++) { x[${T} - 2] = indices[${T} - 2] * ${k} - ${Y} + j; `, V = ` } ` } return ` float process(int indices[${T}]) { int x[${T}]; copyVec(indices, x); float value = ${y}; int pad = 0; ${L} ${P} ${V} ${_} return value; } ` } { const w = c.ShapeUtil.size(g.kernelShape), S = c.ShapeUtil.computeStrides(g.kernelShape), O = S.length, E = g.pads.length, v = i(O), P = r(d, "inputDims"), L = r(g.pads, "pads"), V = r(S, "kernelStrides"), R = r(g.strides, "strides"); let k = ""; return k = g.pads.reduce((Y, C) => Y + C) ? ` if (x[j] >= inputDims[j] || x[j] < 0) { pad++; isPad = true; break; } } if (!isPad) { ${m} }` : ` } ${m} `, ` ${v} float process(int indices[${T}]) { int x[${T}]; copyVec(indices, x); int offset[${O}]; int pads[${E}]; int inputDims[${T}]; int kernelStrides[${O}]; int strides[${O}]; ${L} ${P} ${R} ${V} float value = ${y}; int pad = 0; bool isPad = false; for (int i = 0; i < ${w}; i++) { offsetToIndices(i, kernelStrides, offset); isPad = false; for (int j = ${T} - ${O}; j < ${T}; j++) { x[j] = indices[j] * strides[j - ${T} + ${O}] + offset[j - ${T} + ${O}] - pads[j - 2]; ${k} } ${_} return value; } ` } }, r = (d, g) => { let m = ""; for (let _ = 0; _ < d.length; _++) m += ` ${g}[${_}] = ${d[_]}; `; return m }, i = d => ` void offsetToIndices(int offset, int[${d}] strides, out int[${d}] indices) { if (${d} == 0) { return; } for (int i = 0; i < ${d} - 1; ++i) { indices[i] = offset / strides[i]; offset -= indices[i] * strides[i]; } indices[${d} - 1] = offset; }` }, 4939: (b, n, a) => { Object.defineProperty(n, "__esModule", { value: !0 }), n.reduceLogSumSquare = n.reduceLogSum = n.reduceProd = n.reduceMin = n.reduceMax = n.reduceMean = n.reduceSum = n.parseReduceAttributes = void 0; const u = a(246), c = a(782), f = a(2517), s = a(2039), h = (o, t, e, r, i) => { l(t); const d = { name: r, inputNames: ["A"], inputTypes: [s.TextureType.unpacked] }; return [o.run(Object.assign(Object.assign({}, d), { cacheHint: e.cacheKey, get: () => p(o, t, e, r, i, d) }), t)] }; n.parseReduceAttributes = o => { const t = o.attributes.getInts("axes", []), e = o.attributes.getInt("keepdims", 1) === 1; return (0, u.createAttributeWithCacheKey)({ axes: t, keepDims: e }) }; const p = (o, t, e, r, i, d) => { const g = [], m = t[0].dims.length || 1, _ = [], y = f.ShapeUtil.normalizeAxes(e.axes, t[0].dims.length), T = i(t, y); let w = T[1]; for (let O = 0; O < t[0].dims.length; O++) y.indexOf(O) >= 0 || y.length === 0 ? (e.keepDims && g.push(1), w = ` for(int j${O} = 0; j${O} < ${t[0].dims[O]}; j${O}++) { inputIdx[${O}] = j${O}; ${w} }`) : (_.push(`inputIdx[${O}] = outputIdx[${g.length}];`), g.push(t[0].dims[O])); const S = ` float process(int outputIdx[${g.length||1}]) { float value; // final result int inputIdx[${m}]; // addressing input data ${_.join(` `)} ${T[0]} // init ops for reduce max/min ${w} ${T[2]} // final computation for reduce mean return value; }`; return Object.assign(Object.assign({}, d), { output: { dims: g, type: t[0].type, textureType: s.TextureType.unpacked }, shaderSource: S }) }, l = o => { if (!o || o.length !== 1) throw new Error("Reduce op requires 1 input."); if (c.NUMBER_TYPES.indexOf(o[0].type) === -1) throw new Error("Invalid input type.") }; n.reduceSum = (o, t, e) => h(o, t, e, "ReduceSum", () => ["value = 0.0;", "value += _A(inputIdx);", ""]), n.reduceMean = (o, t, e) => h(o, t, e, "ReduceMean", (r, i) => { let d = 1; for (let g = 0; g < r[0].dims.length; g++)(i.indexOf(g) >= 0 || i.length === 0) && (d *= r[0].dims[g]); return ["value = 0.0;", "value += _A(inputIdx);", `value /= ${d}.;`] }), n.reduceMax = (o, t, e) => h(o, t, e, "ReduceMax", (r, i) => { const d = []; for (let g = 0; g < r[0].dims.length; g++)(i.indexOf(g) >= 0 || i.length === 0) && d.push(`inputIdx[${g}] = 0;`); return [`${d.join(` `)} value = _A(inputIdx);`, "value = max(value, _A(inputIdx));", ""] }), n.reduceMin = (o, t, e) => h(o, t, e, "ReduceMin", (r, i) => { const d = []; for (let g = 0; g < r[0].dims.length; g++)(i.indexOf(g) >= 0 || i.length === 0) && d.push(`inputIdx[${g}] = 0;`); return [`${d.join(` `)} value = _A(inputIdx);`, "value = min(value, _A(inputIdx));", ""] }), n.reduceProd = (o, t, e) => h(o, t, e, "ReduceProd", () => ["value = 1.0;", "value *= _A(inputIdx);", ""]), n.reduceLogSum = (o, t, e) => h(o, t, e, "ReduceLogSum", () => ["value = 0.0;", "value += _A(inputIdx);", "value = log(value);"]), n.reduceLogSumSquare = (o, t, e) => h(o, t, e, "ReduceLogSumSquare", () => ["float t; value = 0.0;", "t = _A(inputIdx); value += t * t;", ""]) }, 7019: (b, n, a) => { Object.defineProperty(n, "__esModule", { value: !0 }), n.isReshapeCheap = n.processDims3D = n.createPackedReshape3DProgramInfoLoader = void 0; const u = a(2517), c = a(5060), f = a(2039), s = a(2827); n.createPackedReshape3DProgramInfoLoader = (h, p, l) => { const o = (t => ({ name: "Reshape (packed)", inputTypes: [f.TextureType.packed], inputNames: ["A"], cacheHint: `${t}` }))(l); return Object.assign(Object.assign({}, o), { get: () => ((t, e, r, i) => { const d = e.dims, g = i; let m = ""; for (let T = 0; T < 4; T++) { let w = ""; switch (T) { case 0: w = "outputCoords = rc;"; break; case 1: w = "outputCoords = ivec3(rc.x, rc.y+1, rc.z);"; break; case 2: w = "outputCoords = ivec3(rc.x, rc.y, rc.z+1);"; break; case 3: w = "outputCoords = ivec3(rc.x, rc.y+1, rc.z+1);"; break; default: throw new Error } m += ` ${w} ${T>0?"if(outputCoords.y < rows && outputCoords.z < cols){":""} int flattenedIndex = getFlattenedIndex(outputCoords); ivec3 inputRC = inputCoordsFromReshapedOutCoords(flattenedIndex); vec2 innerDims = vec2(float(inputRC.y),float(inputRC.z)); result[${T}] = getChannel(getA(inputRC.x, inputRC.y, inputRC.z), innerDims); ${T>0?"}":""} ` } const _ = (0, c.getGlsl)(t.session.backend.glContext.version), y = ` ${function(T){const w=u.ShapeUtil.computeStrides(T),S=["b","r","c"],O="index";return` ivec3 inputCoordsFromReshapedOutCoords(int index) { ${w.map((E,v)=>`int ${S[v]} = ${O} / ${E}; ${v===w.length-1?`int ${S[v+1]} = ${O} - ${S[v]} * ${E}`:`index -= ${S[v]} * ${E}`};`).join("")} return ivec3(b, r, c); } `}(d)} ${function(T){const w=u.ShapeUtil.computeStrides(T);return` int getFlattenedIndex(ivec3 coords) { // reverse y, z order return coords.x * ${w[0]} + coords.z * ${w[1]} + coords.y; } `}(g)} ${(0,s.unpackFromChannel)()} void main() { ivec3 rc = getOutputCoords(); vec4 result = vec4(0.0); ivec3 outputCoords; int rows = ${g[2]}; int cols = ${g[1]}; ${m} ${_.output} = result; } `; return Object.assign(Object.assign({}, r), { output: { dims: g, type: e.type, textureType: f.TextureType.packed }, shaderSource: y, hasMain: !0 }) })(h, p, o, l) }) }, n.processDims3D = function(h) { if (h.length === 0) return [1, 1, 1]; let p = 1; for (let l = 0; l < h.length - 2; ++l) p *= h[l]; return [p, h.length > 1 ? h[h.length - 2] : 1, h[h.length - 1]] }, n.isReshapeCheap = function(h, p) { let l = !1; return l = h.length === 0 || p.length === 0 || (h.length < 2 || p.length < 2 ? h[h.length - 1] === p[p.length - 1] : h[h.length - 1] === p[p.length - 1] && h[h.length - 2] === p[p.length - 2]), l } }, 718: (b, n, a) => { Object.defineProperty(n, "__esModule", { value: !0 }), n.reshape = void 0; const u = a(2517); n.reshape = (c, f) => { const s = u.ShapeUtil.calculateReshapedDims(f[0].dims, f[1].integerData); return c.session.pack ? [c.reshapePacked(f[0], s)] : [c.reshapeUnpacked(f[0], s)] } }, 2268: (b, n, a) => { Object.defineProperty(n, "__esModule", { value: !0 }), n.parseResizeAttributesV11 = n.parseResizeAttributesV10 = n.resize = void 0; const u = a(5060), c = a(2039), f = a(9390), s = a(2827), h = a(9793), p = { name: "Resize", inputNames: ["A"], inputTypes: [c.TextureType.packed] }; n.resize = (r, i, d) => ((0, h.validateInputs)(i, d), [r.run(Object.assign(Object.assign({}, p), { cacheHint: d.cacheKey, get: () => l(r, i, d) }), i)]), n.parseResizeAttributesV10 = r => (0, h.parseUpsampleAttributes)(r, 10), n.parseResizeAttributesV11 = r => (0, h.parseUpsampleAttributes)(r, 11); const l = (r, i, d) => { const g = (0, u.getGlsl)(r.session.backend.glContext.version), [m, _] = o(i, d); if (m.every(k => k === 1) && d.coordinateTransformMode !== "tf_crop_and_resize") return Object.assign(Object.assign({}, p), { output: { dims: _, type: i[0].type, textureType: c.TextureType.packed }, hasMain: !0, shaderSource: `void main() { vec4 v = ${g.texture2D}(X, TexCoords); ${g.output} = v; }` }); const y = _.length; if (y < 2) throw new Error(`output dimension should be at least 2, but got ${y}`); const T = _[y - 2], w = _[y - 1], S = i[0].dims; if (y !== S.length) throw new Error(`output dimension should match input ${S.length}, but got ${y}`); const O = S[y - 2], E = S[y - 1], v = m[y - 2], P = m[y - 1]; let L = ""; if (d.mode !== "linear") throw new Error(`resize (packed) does not support mode: '${d.mode}'`); switch (d.coordinateTransformMode) { case "asymmetric": L = ` vec4 getSourceFracIndex(ivec4 coords) { return vec4(coords) / scaleWHWH; } `; break; case "half_pixel": L = ` vec4 getSourceFracIndex(ivec4 coords) { return (vec4(coords) + 0.5) / scaleWHWH - 0.5; } `; break; case "pytorch_half_pixel": L = ` vec4 getSourceFracIndex(ivec4 coords) { vec4 fcoords = vec4(coords); return vec4( ${w}.0 > 1.0 ? (fcoords.x + 0.5) / scaleWHWH.x - 0.5 : 0.0, ${T}.0 > 1.0 ? (fcoords.y + 0.5) / scaleWHWH.y - 0.5 : 0.0, ${w}.0 > 1.0 ? (fcoords.z + 0.5) / scaleWHWH.z - 0.5 : 0.0, ${T}.0 > 1.0 ? (fcoords.w + 0.5) / scaleWHWH.w - 0.5 : 0.0 ); } `; break; case "align_corners": L = ` vec4 getSourceFracIndex(ivec4 coords) { vec4 resized = vec4(${w}.0 - 1.0, ${T}.0 - 1.0, ${w}.0 - 1.0, ${T}.0 - 1.0); vec4 original = vec4(${E}.0 - 1.0, ${O}.0 - 1.0, ${E}.0 - 1.0, ${O}.0 - 1.0); vec4 new_scale = original / resized; return vec4(coords) * new_scale; } `; break; default: throw new Error(`resize (packed) does not support coordinateTransformMode: '${d.coordinateTransformMode}'`) } const V = (0, f.getCoordsDataType)(y), R = ` const vec2 inputWH = vec2(${O}.0, ${E}.0); const vec4 scaleWHWH = vec4(float(${v}), float(${P}), float(${v}), float(${P})); ${(0,s.unpackFromChannel)()} ${L} float getAValue(int x10, int r, int c, int d) { return getChannel(getA(x10, r, c, d), vec2(c, d)); } void main() { ${V} rc = getOutputCoords(); int batch = rc[0]; int depth = rc[1]; // retrieve the 4 coordinates that is used in the 4 packed output values. ivec4 coords = ivec4(rc.wz, rc.w + 1, rc.z + 1); // calculate the source index in fraction vec4 sourceFrac = getSourceFracIndex(coords); // get the lower and upper bound of the 4 values that will be packed into one texel. ivec4 x00 = ivec4(max(sourceFrac.xy, vec2(0.0)), min(inputWH - 1.0, ceil(sourceFrac.xy))); ivec4 x01 = ivec4(max(sourceFrac.xw, vec2(0.0)), min(inputWH - 1.0, ceil(sourceFrac.xw))); ivec4 x10 = ivec4(max(sourceFrac.zy, vec2(0.0)), min(inputWH - 1.0, ceil(sourceFrac.zy))); ivec4 x11 = ivec4(max(sourceFrac.zw, vec2(0.0)), min(inputWH - 1.0, ceil(sourceFrac.zw))); bool hasNextRow = rc.w < ${T-1}; bool hasNextCol = rc.z < ${w-1}; // pack x00, x01, x10, x11's top-left corner into one vec4 structure vec4 topLeft = vec4( getAValue(batch, depth, x00.x, x00.y), hasNextCol ? getAValue(batch, depth, x01.x, x01.y) : 0.0, hasNextRow ? getAValue(batch, depth, x10.x, x10.y) : 0.0, (hasNextRow && hasNextCol) ? getAValue(batch, depth, x11.x, x11.y) : 0.0); // pack x00, x01, x10, x11's top-right corner into one vec4 structure vec4 topRight = vec4( getAValue(batch, depth, x00.x, x00.w), hasNextCol ? getAValue(batch, depth, x01.x, x01.w) : 0.0, hasNextRow ? getAValue(batch, depth, x10.x, x10.w) : 0.0, (hasNextRow && hasNextCol) ? getAValue(batch, depth, x11.x, x11.w) : 0.0); // pack x00, x01, x10, x11's bottom-left corner into one vec4 structure vec4 bottomLeft = vec4( getAValue(batch, depth, x00.z, x00.y), hasNextCol ? getAValue(batch, depth, x01.z, x01.y) : 0.0, hasNextRow ? getAValue(batch, depth, x10.z, x10.y) : 0.0, (hasNextRow && hasNextCol) ? getAValue(batch, depth, x11.z, x11.y) : 0.0); // pack x00, x01, x10, x11's bottom-right corner into one vec4 structure vec4 bottomRight = vec4( getAValue(batch, depth, x00.z, x00.w), hasNextCol ? getAValue(batch, depth, x01.z, x01.w) : 0.0, hasNextRow ? getAValue(batch, depth, x10.z, x10.w) : 0.0, (hasNextRow && hasNextCol) ? getAValue(batch, depth, x11.z, x11.w) : 0.0); // calculate the interpolation fraction on u and v direction vec4 frac = vec4(sourceFrac) - floor(sourceFrac); vec4 clampFrac = clamp(frac, vec4(0.0), vec4(1.0)); vec4 top = mix(topLeft, topRight, clampFrac.ywyw); vec4 bottom = mix(bottomLeft, bottomRight, clampFrac.ywyw); vec4 newValue = mix(top, bottom, clampFrac.xxzz); ${g.output} = vec4(newValue); } `; return Object.assign(Object.assign({}, p), { output: { dims: _, type: i[0].type, textureType: c.TextureType.packed }, hasMain: !0, shaderSource: R }) }, o = (r, i) => { const d = r[0].dims; let g, m = i.scales; if (m.length === 0) { const y = r[i.scalesInputIdx]; if (y && y.size !== 0) { if (r[i.sizesInputIdx]) throw new Error("Only one of scales or sizes must be provided as input."); m = t(y, i.mode, i.isResize) } else { const T = r[i.sizesInputIdx]; if (!T || T.size === 0) throw new Error("Either scales or sizes MUST be provided as input."); g = Array.from(T.integerData), m = e(g, d, i.mode, i.isResize) } } else if (r[i.sizesInputIdx]) throw new Error("Only one of scales or sizes must be provided as input."); const _ = g || d.map((y, T) => Math.floor(y * m[T])); return [m, _] }, t = (r, i, d) => { const g = Array.from(r.floatData); return (0, h.scalesValidation)(g, i, d), g }, e = (r, i, d, g) => { const m = i.length, _ = new Array(m); for (let y = 0, T = m; y < T; y++) if (i[y] === 0) { if (r[y] !== 0) throw new Error("Input dim is zero but required output dim is non-zero."); _[y] = 1 } else _[y] = r[y] / i[y]; return (0, h.scalesValidation)(_, d, g), _ } }, 8117: (b, n, a) => { Object.defineProperty(n, "__esModule", { value: !0 }), n.shape = void 0; const u = a(9162); n.shape = (f, s) => (c(s), [new u.Tensor([s[0].dims.length], "int32", void 0, void 0, new Int32Array(s[0].dims))]); const c = f => { if (!f || f.length !== 1) throw new Error("Shape requires 1 input.") } }, 2278: (b, n, a) => { Object.defineProperty(n, "__esModule", { value: !0 }), n.sliceV10 = n.parseSliceAttributes = n.slice = void 0; const u = a(246), c = a(782), f = a(2517), s = a(2039), h = { name: "Slice", inputNames: ["A"], inputTypes: [s.TextureType.unpacked] }; n.slice = (e, r, i) => (l(r), [e.run(Object.assign(Object.assign({}, h), { cacheHint: i.cacheKey, get: () => p(e, r[0], i) }), r)]), n.parseSliceAttributes = e => { const r = e.attributes.getInts("starts"), i = e.attributes.getInts("ends"), d = e.attributes.getInts("axes", []); return (0, u.createAttributeWithCacheKey)({ starts: r, ends: i, axes: d }) }; const p = (e, r, i) => { const d = i.axes.length === 0 ? r.dims.slice(0).map((S, O) => O) : i.axes, g = f.ShapeUtil.normalizeAxes(d, r.dims.length), m = i.starts.map((S, O) => S > r.dims[g[O]] - 1 ? r.dims[g[O]] : f.ShapeUtil.normalizeAxis(S, r.dims[g[O]])), _ = i.ends.map((S, O) => S > r.dims[g[O]] - 1 ? r.dims[g[O]] : f.ShapeUtil.normalizeAxis(S, r.dims[g[O]])), y = r.dims.slice(), T = []; for (let S = 0; S < g.length; S++) y[g[S]] = _[S] - m[S], m[S] > 0 && T.push(`outputIdx[${g[S]}] += ${m[S]};`); const w = ` float process(int outputIdx[${y.length}]) { ${T.join(` `)} return _A(outputIdx); }`; return Object.assign(Object.assign({}, h), { output: { dims: y, type: r.type, textureType: s.TextureType.unpacked }, shaderSource: w }) }, l = e => { if (!e || e.length !== 1) throw new Error("Slice requires 1 input."); if (c.NUMBER_TYPES.indexOf(e[0].type) === -1) throw new Error("Invalid input type.") }; n.sliceV10 = (e, r) => { t(r); const i = o(e, r); return [e.run(Object.assign(Object.assign({}, h), { cacheHint: i.cacheKey, get: () => p(e, r[0], i) }), [r[0]])] }; const o = (e, r) => { if (!e.session.isInitializer(r[1].dataId) || !e.session.isInitializer(r[2].dataId) || r.length >= 4 && !e.session.isInitializer(r[3].dataId) || r.length >= 5 && !e.session.isInitializer(r[4].dataId)) throw new Error("dynamic slice attributes are not allowed"); if (r.length >= 5 && r[4].integerData.some(m => m !== 1)) throw new Error("currently non-1 steps is not supported for Slice"); const i = Array.from(r[1].integerData), d = Array.from(r[2].integerData), g = r.length >= 4 ? Array.from(r[3].integerData) : []; return { starts: i, ends: d, axes: g, cacheKey: `${g};${i};${d}` } }, t = e => { if (!e || e.length < 3 || e.length > 5) throw new Error("Invalid input number."); if (e[1].type !== "int32" || e[1].dims.length !== 1) throw new Error("Invalid input type."); if (e[2].type !== "int32" || e[2].dims.length !== 1) throw new Error("Invalid input type."); if (e.length >= 4 && (e[3].type !== "int32" || e[3].dims.length !== 1)) throw new Error("Invalid input type."); if (e.length >= 5 && (e[4].type !== "int32" || e[4].dims.length !== 1)) throw new Error("Invalid input type.") } }, 5524: (b, n, a) => { Object.defineProperty(n, "__esModule", { value: !0 }), n.softmaxV13 = n.parseSoftmaxAttributesV13 = n.parseSoftmaxAttributes = n.softmax = void 0; const u = a(246), c = a(2517), f = a(5060), s = a(2039), h = a(3738), p = { name: "SoftmaxComputeMax", inputNames: ["A"], inputTypes: [s.TextureType.unpacked] }, l = { name: "SoftmaxComputeScale", inputNames: ["A", "Max"], inputTypes: [s.TextureType.unpacked, s.TextureType.unpacked] }, o = { name: "SoftMax", inputNames: ["A", "Max", "Norm"], inputTypes: [s.TextureType.unpacked, s.TextureType.unpacked, s.TextureType.unpacked] }; n.softmax = (g, m, _) => { d(m); const y = m[0].dims.slice(), T = c.ShapeUtil.normalizeAxis(_.axis, y.length), w = c.ShapeUtil.sizeToDimension(y, T), S = c.ShapeUtil.sizeFromDimension(y, T); return t(g, m, _, w, S) }, n.parseSoftmaxAttributes = g => (0, u.createAttributeWithCacheKey)({ axis: g.attributes.getInt("axis", 1) }), n.parseSoftmaxAttributesV13 = g => (0, u.createAttributeWithCacheKey)({ axis: g.attributes.getInt("axis", -1) }), n.softmaxV13 = (g, m, _) => { d(m); const y = m[0].dims.slice(), T = c.ShapeUtil.normalizeAxis(_.axis, y.length), w = y.length, S = T !== w - 1, O = []; let E, v = [], P = []; S && (v = Array.from({ length: w }).map((k, Y) => Y), v[T] = w - 1, v[w - 1] = T, v.map(k => O.push(y[k])), E = (0, u.createAttributeWithCacheKey)({ perm: v }), P = (0, h.transpose)(g, m, E)); const L = S ? c.ShapeUtil.sizeToDimension(O, w - 1) : c.ShapeUtil.sizeToDimension(y, w - 1), V = S ? c.ShapeUtil.sizeFromDimension(O, w - 1) : c.ShapeUtil.sizeFromDimension(y, w - 1), R = t(g, S ? P : m, _, L, V); return S ? (0, h.transpose)(g, R, E) : R }; const t = (g, m, _, y, T) => { const w = e(g, m[0], y, T, [y]), S = g.run(Object.assign(Object.assign({}, p), { cacheHint: _.cacheKey, get: () => w }), m), O = r(g, m[0], y, T, w.output.dims, [y]), E = g.run(Object.assign(Object.assign({}, l), { cacheHint: _.cacheKey, get: () => O }), [m[0], S]), v = i(g, m[0], y, T, w.output.dims, O.output.dims); return [g.run(Object.assign(Object.assign({}, o), { cacheHint: _.cacheKey, get: () => v }), [m[0], S, E])] }, e = (g, m, _, y, T) => { const [w, S] = g.calculateTextureWidthAndHeight(m.dims, s.TextureType.unpacked), O = T.length; if (_ < 1 || y < 1) throw new Error("Logical row count N and feature count D must be greater than or equal to 1"); if (T.length !== 1) throw new Error("Dimensionality of the output should be 1"); if (T[0] !== _) throw new Error("Shape of the output should be equal to logical row count"); const E = (0, f.getGlsl)(g.session.backend.glContext.version), v = ` float process(int[${O}] indices) { int logical_row_start_offset = indices[0] * ${y}; float max = getColorAsFloat(${E.texture2D}(A, offsetToCoords(logical_row_start_offset, ${w}, ${S} ))); for(int i=1; i<${y}; ++i) { float current = getColorAsFloat(${E.texture2D}(A, offsetToCoords(logical_row_start_offset + i, ${w}, ${S}))); if(current > max) max = current; } return max; }`; return Object.assign(Object.assign({}, p), { output: { dims: T, type: m.type, textureType: s.TextureType.unpacked }, shaderSource: v }) }, r = (g, m, _, y, T, w) => { const [S, O] = g.calculateTextureWidthAndHeight(m.dims, s.TextureType.unpacked), E = w.length; if (_ < 1 || y < 1) throw new Error("Logical row count N and feature count D must be greater than or equal to 1"); if (w.length !== 1) throw new Error("Dimensionality of the output should be 1"); if (w[0] !== _) throw new Error("Shape of the output should be equal to logical row count"); if (T.length !== 1) throw new Error("Dimensionality of the intermediate results should be 1"); if (T[0] !== _) throw new Error("Shape of the intermediate results should be equal to logical row count"); const v = ` float process(int[${E}] indices) { int logical_row_start_offset = indices[0] * ${y}; float norm_factor = 0.0; float max = _Max(indices); for(int i=0; i<${y}; ++i) { norm_factor += exp(getColorAsFloat(${(0,f.getGlsl)(g.session.backend.glContext.version).texture2D}(A, offsetToCoords(logical_row_start_offset + i, ${S}, ${O}))) - max); } return norm_factor; }`; return Object.assign(Object.assign({}, l), { output: { dims: w, type: m.type, textureType: s.TextureType.unpacked }, shaderSource: v }) }, i = (g, m, _, y, T, w) => { const [S, O] = g.calculateTextureWidthAndHeight(m.dims, s.TextureType.unpacked), E = m.dims.length; if (_ < 1 || y < 1) throw new Error("Logical row count N and feature count D must be greater than or equal to 1"); if (T.length !== 1 || w.length !== 1) throw new Error("Dimensionality of the intermediate results should be 1"); if (T[0] !== _ || w[0] !== _) throw new Error("Shape of the intermediate results should be equal to logical row count"); const v = ` float process(int[${E}] indices) { // get offset of current logical tensor index from the 2-D texture coordinates (TexCoords) int offset = coordsToOffset(TexCoords, ${S}, ${O}); //determine the logical row for this index int logical_row_index[1]; logical_row_index[0] = offset / ${y}; float norm_factor = _Norm(logical_row_index); // avoid possible division by 0 // if norm_facor is 0, all elements are zero // if so, return 0 if(norm_factor == 0.0) return 0.0; return exp(_A(indices) - _Max(logical_row_index)) / norm_factor; }`; return Object.assign(Object.assign({}, o), { output: { dims: m.dims, type: m.type, textureType: s.TextureType.unpacked }, shaderSource: v }) }, d = g => { if (!g || g.length !== 1) throw new Error("Softmax requires 1 input."); if (g[0].type !== "float32" && g[0].type !== "float64") throw new Error("Invalid input type") } }, 5975: (b, n, a) => { Object.defineProperty(n, "__esModule", { value: !0 }), n.parseSplitAttributes = n.split = void 0; const u = a(246), c = a(2517), f = a(2039), s = { name: "Split", inputNames: ["A"], inputTypes: [f.TextureType.unpacked] }; n.split = (o, t, e) => { l(t); const r = c.ShapeUtil.normalizeAxis(e.axis, t[0].dims.length), i = h(o, t, r, e), d = []; for (let g = 0; g < i; ++g) d.push(o.run(Object.assign(Object.assign({}, s), { cacheHint: `${e.cacheKey};${g}`, get: () => p(o, t[0], e, r, g) }), t)); return d }, n.parseSplitAttributes = o => { const t = o.attributes.getInt("axis", 0), e = o.attributes.getInts("split", []), r = o.outputs.length; return (0, u.createAttributeWithCacheKey)({ axis: t, split: e, numOutputs: r }) }; const h = (o, t, e, r) => { const [, i] = c.SplitUtil.splitShape(t[0].dims, e, r.split, r.numOutputs); return i.length }, p = (o, t, e, r, i) => { const [d, g] = c.SplitUtil.splitShape(t.dims, r, e.split, e.numOutputs), m = g[i], _ = d[i], y = ` float process(int indices[${_.length}]) { indices[${r}] += ${m}; return _A(indices); } `; return Object.assign(Object.assign({}, s), { cacheHint: `${e.cacheKey}:${i}`, output: { dims: _, type: t.type, textureType: f.TextureType.unpacked }, shaderSource: y }) }, l = o => { if (!o || o.length !== 1) throw new Error("Split requires one input."); if (o[0].type !== "int8" && o[0].type !== "uint8" && o[0].type !== "int16" && o[0].type !== "uint16" && o[0].type !== "int32" && o[0].type !== "uint32" && o[0].type !== "float32" && o[0].type !== "float64" && o[0].type !== "bool") throw new Error("Invalid input type.") } }, 3933: (b, n, a) => { Object.defineProperty(n, "__esModule", { value: !0 }), n.parseSqueezeAttributes = n.squeezeV13 = n.squeeze = void 0; const u = a(2517); n.squeeze = (s, h, p) => { c(h); const l = u.ShapeUtil.squeezeShape(h[0].dims, p); return [s.reshapeUnpacked(h[0], l)] }, n.squeezeV13 = (s, h) => (f(h), (0, n.squeeze)(s, [h[0]], Array.from(h[1].integerData))), n.parseSqueezeAttributes = s => s.attributes.getInts("axes"); const c = s => { if (!s || s.length !== 1) throw new Error("Squeeze requires 1 input."); if (s[0].type === "string") throw new Error("invalid input tensor types.") }, f = s => { if (!s || s.length !== 2) throw new Error("Squeeze requires 2 inputs."); if (s[1].type !== "int32") throw new Error("Invalid input type.") } }, 6558: (b, n, a) => { Object.defineProperty(n, "__esModule", { value: !0 }), n.sum = void 0; const u = a(5060), c = a(2039); n.sum = (h, p) => { s(p); const l = { name: "Sum", inputNames: p.map((o, t) => `X${t}`), inputTypes: new Array(p.length).fill(c.TextureType.unpacked) }; return [h.run(Object.assign(Object.assign({}, l), { get: () => f(h, p, l) }), p)] }; const f = (h, p, l) => { const o = (0, u.getGlsl)(h.session.backend.glContext.version), t = p[0].dims.slice(), e = ` void main() { vec4 result = ${p.map((r,i)=>`${o.texture2D}(X${i},TexCoords)`).join(" + ")}; ${o.output} = result; } `; return Object.assign(Object.assign({}, l), { output: { dims: t, type: p[0].type, textureType: c.TextureType.unpacked }, hasMain: !0, shaderSource: e }) }, s = h => { if (!h || h.length === 0) throw new Error("Sum requires inputs."); const p = h[0].dims.length; for (let l = 1; l < h.length; l++) { if (p !== h[l].dims.length) throw new Error("Input shapes are mismatched."); for (let o = 0; o < p; o++) if (h[0].dims[o] !== h[l].dims[o]) throw new Error("Input shapes are not matched.") } if (h[0].type !== "float32" && h[0].type !== "float64") throw new Error("Invalid input type."); for (let l = 1; l < h.length; l++) if (h[0].type !== h[l].type) throw new Error("Input types are not matched.") } }, 5723: (b, n, a) => { Object.defineProperty(n, "__esModule", { value: !0 }), n.tile = void 0; const u = a(782), c = a(2039); n.tile = (h, p) => { s(p); const l = { name: "Tile", inputNames: ["A"], inputTypes: [c.TextureType.unpacked] }; return [h.run(Object.assign(Object.assign({}, l), { get: () => f(h, p, l) }), p)] }; const f = (h, p, l) => { const o = p[0].dims.slice(), t = new Array(o.length), e = []; for (let d = 0; d < o.length; d++) t[d] = o[d] * p[1].numberData[d], e.push(`inputIdx[${d}] = int(mod(float(outputIdx[${d}]), ${o[d]}.));`); const r = t.length, i = ` float process(int outputIdx[${r}]) { int inputIdx[${r}]; ${e.join(` `)} return _A(inputIdx); } `; return Object.assign(Object.assign({}, l), { output: { dims: t, type: p[0].type, textureType: c.TextureType.unpacked }, shaderSource: i }) }, s = h => { if (!h || h.length !== 2) throw new Error("Tile requires 2 input."); if (h[1].dims.length !== 1) throw new Error("The second input shape must 1 dimension."); if (h[1].dims[0] !== h[0].dims.length) throw new Error("Invalid input shape."); if (u.NUMBER_TYPES.indexOf(h[0].type) === -1) throw new Error("Invalid input type."); if (h[1].type !== "int32" && h[1].type !== "int16") throw new Error("Invalid repeat type.") } }, 3738: (b, n, a) => { Object.defineProperty(n, "__esModule", { value: !0 }), n.parseTransposeAttributes = n.transpose = void 0; const u = a(246), c = a(2517), f = a(2039), s = { name: "Transpose", inputNames: ["A"], inputTypes: [f.TextureType.unpacked] }; n.transpose = (e, r, i) => (t(r), [e.run(Object.assign(Object.assign({}, s), { cacheHint: i.cacheKey, get: () => h(e, r[0], i.perm) }), r)]), n.parseTransposeAttributes = e => (0, u.createAttributeWithCacheKey)({ perm: e.attributes.getInts("perm", []) }); const h = (e, r, i) => { const d = r.dims; i = p(d, i); const g = l(d, i), m = d.length, _ = ` ${o("perm",i,m)} float process(int indices[${m}]) { int a[${m}]; perm(a, indices); return _A(a); }`; return Object.assign(Object.assign({}, s), { output: { dims: g, type: r.type, textureType: f.TextureType.unpacked }, shaderSource: _ }) }, p = (e, r) => (r && r.length !== e.length && (r = [...e.keys()].reverse()), r), l = (e, r) => (r = p(e, r), c.ShapeUtil.sortBasedOnPerm(e, r)), o = (e, r, i) => { const d = []; d.push(`void ${e}(out int a[${i}], int src[${i}]) {`); for (let g = 0; g < i; ++g) d.push(` a[${r[g]}]=src[${g}];`); return d.push(" }"), d.join(` `) }, t = e => { if (!e || e.length !== 1) throw new Error("Transpose requires 1 input."); if (e[0].type !== "float32" && e[0].type !== "float64") throw new Error("input should be float tensor") } }, 8710: (b, n, a) => { Object.defineProperty(n, "__esModule", { value: !0 }), n.encodeAsUint8 = void 0; const u = a(5060), c = a(2039); n.encodeAsUint8 = (f, s) => { const h = s.shape, p = (0, u.getGlsl)(f.session.backend.glContext.version), l = ` const float FLOAT_MAX = 1.70141184e38; const float FLOAT_MIN = 1.17549435e-38; bool isNaN(float val) { return (val < 1.0 || 0.0 < val || val == 0.0) ? false : true; } highp vec4 encodeAsUint8(highp float v) { if (isNaN(v)) { return vec4(255, 255, 255, 255); } highp float av = abs(v); if(av < FLOAT_MIN) { return vec4(0.0, 0.0, 0.0, 0.0); } else if(v > FLOAT_MAX) { return vec4(0.0, 0.0, 128.0, 127.0) / 255.0; } else if(v < -FLOAT_MAX) { return vec4(0.0, 0.0, 128.0, 255.0) / 255.0; } highp vec4 c = vec4(0,0,0,0); highp float e = floor(log2(av)); highp float m = exp2(fract(log2(av))) - 1.0; c[2] = floor(128.0 * m); m -= c[2] / 128.0; c[1] = floor(32768.0 * m); m -= c[1] / 32768.0; c[0] = floor(8388608.0 * m); highp float ebias = e + 127.0; c[3] = floor(ebias / 2.0); ebias -= c[3] * 2.0; c[2] += floor(ebias) * 128.0; c[3] += 128.0 * step(0.0, -v); return c / 255.0; } void main() { float value = ${p.texture2D}(X,TexCoords).r; ${p.output} = encodeAsUint8(value); }`, o = { name: "Uint8Encode", inputTypes: [c.TextureType.unpacked], inputNames: ["X"], output: { dims: h, type: s.tensor.type, textureType: c.TextureType.downloadUint8AsFloat }, shaderSource: l, hasMain: !0 }; return f.executeProgram(o, [s.tensor]) } }, 4909: (b, n, a) => { Object.defineProperty(n, "__esModule", { value: !0 }), n.tanh = n.tan = n.sqrt = n.sin = n.sigmoid = n.relu = n.not = n.neg = n.log = n.parseLeakyReluAttributes = n.leakyRelu = n.identity = n.floor = n.exp = n.parseEluAttributes = n.elu = n.cos = n.ceil = n.clipV11 = n.parseClipAttributes = n.clip = n.atan = n.asin = n.acos = n.abs = n.glslTanh = n.glslTan = n.glslSqrt = n.glslSigmoid = n.glslRelu = n.glslSin = n.glslNot = n.glslNeg = n.glslLog = n.glslLeakyRelu = n.glslIdentity = n.glslClip = n.glslFloor = n.glslExp = n.glslElu = n.glslCos = n.glslCeil = n.glslAtan = n.glslAsin = n.glslAcos = n.glslAbs = void 0; const u = a(246), c = a(2517), f = a(8520), s = a(5060), h = a(2039); function p() { return R("abs") } function l() { return R("acos") } function o() { return R("asin") } function t() { return R("atan") } function e() { return R("ceil") } function r() { return R("cos") } function i(C) { const $ = "elu"; return { body: ` const float alpha = float(${C}); float ${$}_(float a) { return a >= 0.0 ? a: (exp(a) - 1.0) * alpha; } vec4 ${$}_(vec4 v) { return vec4(${$}_(v.x), ${$}_(v.y), ${$}_(v.z), ${$}_(v.w)); } `, name: $, type: f.FunctionType.ValueBased } } function d() { return R("exp") } function g() { return R("floor") } function m(C, $) { const X = "clip"; return { body: ` const float min = float(${C}); const float max = float(${$}); float ${X}_(float a) { return clamp(a, min, max); } vec4 ${X}_(vec4 v) { return clamp(v, min, max); } `, name: X, type: f.FunctionType.ValueBased } } function _() { const C = "indentity"; return { body: ` float ${C}_(float a) { return a; } vec4 ${C}_(vec4 v) { return v; } `, name: C, type: f.FunctionType.ValueBased } } function y(C) { const $ = "leakyRelu"; return { body: ` const float alpha = float(${C}); float ${$}_(float a) { return a < 0.0 ? a * alpha : a; } vec4 ${$}_(vec4 v) { return vec4(${$}_(v.x), ${$}_(v.y), ${$}_(v.z), ${$}_(v.w)); } `, name: $, type: f.FunctionType.ValueBased } } function T() { return R("log") } function w() { const C = "neg"; return { body: ` float ${C}_(float a) { return -a; } vec4 ${C}_(vec4 v) { return -v; } `, name: C, type: f.FunctionType.ValueBased } } function S() { const C = "not"; return { body: ` float ${C}_(float a) { return float( ! bool(a) ); } bool ${C}_(bool a) { return !a; } vec4 ${C}_(vec4 v) { return vec4(!bool(v.x), !bool(v.y), !bool(v.z), !bool(v.w)); } bvec4 ${C}_(bvec4 v) { return bvec4(!v.x, !v.y, !v.z, !v.w); } `, name: C, type: f.FunctionType.ValueBased } } function O() { return R("sin") } function E() { const C = "relu"; return { body: ` float ${C}_(float a) { return max( a, 0.0 ); } vec4 ${C}_(vec4 v) { return max( v, 0.0 ); } `, name: C, type: f.FunctionType.ValueBased } } function v() { const C = "sigmoid"; return { body: ` float ${C}_(float a) { return 1.0 / (1.0 + exp(-a)); } vec4 ${C}_(vec4 v) { return 1.0 / (1.0 + exp(-v)); } `, name: C, type: f.FunctionType.ValueBased } } function P() { return R("sqrt") } function L() { return R("tan") } function V() { const C = "tanh"; return { body: ` float ${C}_(float a) { a = clamp(a, -10., 10.); a = exp(2.*a); return (a - 1.) / (a + 1.); } vec4 ${C}_(vec4 v) { v = clamp(v, -10., 10.); v = exp(2.*v); return (v - 1.) / (v + 1.); } `, name: C, type: f.FunctionType.ValueBased } } function R(C) { return { body: ` float ${C}_(float a) { return ${C}(a); } vec4 ${C}_(vec4 v) { return ${C}(v); } `, name: C, type: f.FunctionType.ValueBased } } n.glslAbs = p, n.glslAcos = l, n.glslAsin = o, n.glslAtan = t, n.glslCeil = e, n.glslCos = r, n.glslElu = i, n.glslExp = d, n.glslFloor = g, n.glslClip = m, n.glslIdentity = _, n.glslLeakyRelu = y, n.glslLog = T, n.glslNeg = w, n.glslNot = S, n.glslSin = O, n.glslRelu = E, n.glslSigmoid = v, n.glslSqrt = P, n.glslTan = L, n.glslTanh = V; const k = (C, $, X, z) => { const Z = C.session.pack ? h.TextureType.packed : h.TextureType.unpacked, J = { name: X.name, inputTypes: [Z], inputNames: ["A"], cacheHint: z }; return Object.assign(Object.assign({}, J), { get: () => ((ue, Se, Te, se) => { const ye = ue.session.pack ? h.TextureType.packed : h.TextureType.unpacked, be = (0, s.getGlsl)(ue.session.backend.glContext.version); return Object.assign(Object.assign({}, Se), { output: { dims: Te.dims, type: Te.type, textureType: ye }, shaderSource: ` ${se.body} void main() { vec4 v = ${be.texture2D}(A, TexCoords); v = ${se.name}_(v); ${be.output} = v; } `, hasMain: !0 }) })(C, J, $, X) }) }; n.abs = (C, $) => [C.run(k(C, $[0], p()), $)], n.acos = (C, $) => [C.run(k(C, $[0], l()), $)], n.asin = (C, $) => [C.run(k(C, $[0], o()), $)], n.atan = (C, $) => [C.run(k(C, $[0], t()), $)], n.clip = (C, $, X) => [C.run(k(C, $[0], m(X.min, X.max), X.cacheKey), $)], n.parseClipAttributes = C => (0, u.createAttributeWithCacheKey)({ min: C.attributes.getFloat("min", c.MIN_CLIP), max: C.attributes.getFloat("max", c.MAX_CLIP) }), n.clipV11 = (C, $) => { const X = Y(C, $); return (0, n.clip)(C, [$[0]], X) }; const Y = (C, $) => { if ($.length >= 3 && (!C.session.isInitializer($[1].dataId) || !C.session.isInitializer($[2].dataId))) throw new Error("dynamic clip attributes are not allowed"); const X = $.length >= 3 ? $[1].numberData[0] : c.MIN_CLIP, z = $.length >= 3 ? $[2].numberData[0] : c.MAX_CLIP; return (0, u.createAttributeWithCacheKey)({ min: X, max: z }) }; n.ceil = (C, $) => [C.run(k(C, $[0], e()), $)], n.cos = (C, $) => [C.run(k(C, $[0], r()), $)], n.elu = (C, $, X) => [C.run(k(C, $[0], i(X.alpha), X.cacheKey), $)], n.parseEluAttributes = C => (0, u.createAttributeWithCacheKey)({ alpha: C.attributes.getFloat("alpha", 1) }), n.exp = (C, $) => [C.run(k(C, $[0], d()), $)], n.floor = (C, $) => [C.run(k(C, $[0], g()), $)], n.identity = (C, $) => [C.run(k(C, $[0], _()), $)], n.leakyRelu = (C, $, X) => [C.run(k(C, $[0], y(X.alpha), X.cacheKey), $)], n.parseLeakyReluAttributes = C => (0, u.createAttributeWithCacheKey)({ alpha: C.attributes.getFloat("alpha", .01) }), n.log = (C, $) => [C.run(k(C, $[0], T()), $)], n.neg = (C, $) => [C.run(k(C, $[0], w()), $)], n.not = (C, $) => [C.run(k(C, $[0], S()), $)], n.relu = (C, $) => [C.run(k(C, $[0], E()), $)], n.sigmoid = (C, $) => [C.run(k(C, $[0], v()), $)], n.sin = (C, $) => [C.run(k(C, $[0], O()), $)], n.sqrt = (C, $) => [C.run(k(C, $[0], P()), $)], n.tan = (C, $) => [C.run(k(C, $[0], L()), $)], n.tanh = (C, $) => [C.run(k(C, $[0], V()), $)] }, 5611: (b, n, a) => { Object.defineProperty(n, "__esModule", { value: !0 }), n.createUnpackProgramInfoLoader = n.createUnpackProgramInfo = void 0; const u = a(5060), c = a(2039), f = a(9390), s = a(2827), h = { name: "unpack", inputNames: ["A"], inputTypes: [c.TextureType.packed] }; n.createUnpackProgramInfo = (p, l) => { const o = l.dims.length, t = (0, s.getChannels)("rc", o), e = t.slice(-2), r = (0, f.getCoordsDataType)(o), i = (0, s.unpackFromChannel)(), d = l.dims.length === 0 ? "" : function(_, y) { if (_ === 1) return "rc"; let T = ""; for (let w = 0; w < _; w++) T += y[w], w < _ - 1 && (T += ","); return T }(o, t), g = o <= 1 ? "rc" : `vec2(${e.join(",")})`, m = ` ${i} void main() { ${r} rc = getOutputCoords(); // Sample the texture with the coords to get the rgba channel value. vec4 packedInput = getA(${d}); ${(0,u.getGlsl)(p.session.backend.glContext.version).output} = vec4(getChannel(packedInput, ${g}), 0, 0, 0); } `; return Object.assign(Object.assign({}, h), { hasMain: !0, output: { dims: l.dims, type: l.type, textureType: c.TextureType.unpacked }, shaderSource: m }) }, n.createUnpackProgramInfoLoader = (p, l) => Object.assign(Object.assign({}, h), { get: () => (0, n.createUnpackProgramInfo)(p, l) }) }, 8428: (b, n, a) => { Object.defineProperty(n, "__esModule", { value: !0 }), n.parseUnsqueezeAttributes = n.unsqueezeV13 = n.unsqueeze = void 0; const u = a(2517); n.unsqueeze = (s, h, p) => { c(h); const l = u.ShapeUtil.unsqueezeShape(h[0].dims, p); return [s.reshapeUnpacked(h[0], l)] }, n.unsqueezeV13 = (s, h) => (f(h), (0, n.unsqueeze)(s, [h[0]], Array.from(h[1].integerData))), n.parseUnsqueezeAttributes = s => s.attributes.getInts("axes"); const c = s => { if (!s || s.length !== 1) throw new Error("Unsqueeze requires 1 input."); if (s[0].type === "string") throw new Error("invalid input tensor types.") }, f = s => { if (!s || s.length !== 2) throw new Error("Unsqueeze requires 2 inputs."); if (s[1].type !== "int32") throw new Error("Invalid input type.") } }, 9793: (b, n, a) => { Object.defineProperty(n, "__esModule", { value: !0 }), n.scalesValidation = n.validateInputs = n.parseUpsampleAttributes = n.parseUpsampleAttributesV9 = n.parseUpsampleAttributesV7 = n.upsample = void 0; const u = a(246), c = a(5060), f = a(2039), s = { name: "Upsample", inputNames: ["X"], inputTypes: [f.TextureType.unpacked] }; n.upsample = (p, l, o) => ((0, n.validateInputs)(l, o), [p.run(Object.assign(Object.assign({}, s), { cacheHint: o.cacheKey, get: () => h(p, l, o) }), l)]), n.parseUpsampleAttributesV7 = p => (0, n.parseUpsampleAttributes)(p, 7), n.parseUpsampleAttributesV9 = p => (0, n.parseUpsampleAttributes)(p, 9), n.parseUpsampleAttributes = (p, l) => { const o = l >= 10, t = p.attributes.getString("mode", "nearest"); if (t !== "nearest" && t !== "linear" && (l < 11 || t !== "cubic")) throw new Error(`unrecognized mode: ${t}`); let e = []; l < 9 && (e = p.attributes.getFloats("scales"), (0, n.scalesValidation)(e, t, o)); const r = p.attributes.getFloat("extrapolation_value", 0), i = l > 10 ? p.attributes.getString("coordinate_transformation_mode", "half_pixel") : "asymmetric"; if (["asymmetric", "pytorch_half_pixel", "tf_half_pixel_for_nn", "align_corners", "tf_crop_and_resize", "half_pixel"].indexOf(i) === -1) throw new Error(`coordinate_transform_mode '${i}' is not supported`); const d = i === "tf_crop_and_resize", g = d, m = t === "nearest" && l >= 11 ? p.attributes.getString("nearest_mode", "round_prefer_floor") : ""; if (["round_prefer_floor", "round_prefer_ceil", "floor", "ceil", ""].indexOf(m) === -1) throw new Error(`nearest_mode '${m}' is not supported`); const _ = p.attributes.getFloat("cubic_coeff_a", -.75), y = p.attributes.getInt("exclude_outside", 0) !== 0; if (y && t !== "cubic") throw new Error("exclude_outside can be set to 1 only when mode is CUBIC."); const T = l < 11 || t === "nearest" && i === "asymmetric" && m === "floor"; let w = 0, S = 0, O = 0; return l > 10 ? p.inputs.length > 2 ? (w = 1, S = 2, O = 3) : (S = 1, O = 2) : l === 9 && (S = 1), (0, u.createAttributeWithCacheKey)({ opset: l, isResize: o, mode: t, scales: e, extrapolationValue: r, coordinateTransformMode: i, useExtrapolation: g, needRoiInput: d, nearestMode: m, cubicCoefficientA: _, excludeOutside: y, useNearest2xOptimization: T, roiInputIdx: w, scalesInputIdx: S, sizesInputIdx: O }) }; const h = (p, l, o) => { const t = (0, c.getGlsl)(p.session.backend.glContext.version), [e, r] = p.calculateTextureWidthAndHeight(l[0].dims, f.TextureType.unpacked), i = l[0].dims.map((O, E) => Math.floor(O * o.scales[E])), [d, g] = p.calculateTextureWidthAndHeight(i, f.TextureType.unpacked), m = i.length, _ = new Array(m), y = new Array(m); let T = ` int output_pitches[${m}]; int input_pitches[${m}]; `; for (let O = m - 1; O >= 0; O--) _[O] = O === m - 1 ? 1 : _[O + 1] * i[O + 1], y[O] = O === m - 1 ? 1 : y[O + 1] * l[0].dims[O + 1], T += ` output_pitches[${O}] = ${_[O]}; input_pitches[${O}] = ${y[O]}; `; const w = ` float getInputFloat(int index) { vec2 coords = offsetToCoords(index, ${e}, ${r}); float value = getColorAsFloat(${t.texture2D}(X, coords)); return value; } `, S = o.mode === "nearest" ? ` ${w} float process(int indices[${m}]) { int input_index = 0; int output_index = coordsToOffset(TexCoords, ${d}, ${g}); ${T} int d, m; for (int dim = 0; dim < ${m}; ++dim) { d = output_index / output_pitches[dim]; m = output_index - d * output_pitches[dim]; output_index = m; if (scales[dim] != 1 && d > 0) { int d2 = d / scales[dim]; m = d - d2 * scales[dim]; d = d2; } input_index += input_pitches[dim] * d; } return getInputFloat(input_index); }` : m === 4 ? ` ${w} float process(int indices[4]) { int input_index = 0; int output_index = coordsToOffset(TexCoords, ${d}, ${g}); ${T} int m; int index_of_dim0, index_of_dim1, index_of_dim2, index_of_dim3; index_of_dim0 = output_index / output_pitches[0]; m = output_index - index_of_dim0 * output_pitches[0]; index_of_dim1 = m / output_pitches[1]; m = m - index_of_dim1 * output_pitches[1]; index_of_dim2 = m / output_pitches[2]; m = m - index_of_dim2 * output_pitches[2]; index_of_dim3 = m; int index_of_input_dim2, index_of_input_dim3, x_offset, y_offset; index_of_input_dim2 = index_of_dim2 / scales[2]; y_offset = index_of_dim2 - index_of_input_dim2 * scales[2]; index_of_input_dim3 = index_of_dim3 / scales[3]; x_offset = index_of_dim3 - index_of_input_dim3 * scales[3]; input_index = index_of_dim0 * input_pitches[0] + index_of_dim1 * input_pitches[1] + index_of_input_dim2 * input_pitches[2] + index_of_input_dim3; float x00 = getInputFloat(input_index); float x10, x01, x11; bool end_of_dim2 = false; if (index_of_input_dim2 == (${l[0].dims[2]} - 1)) { // It's the end in dimension 2 x01 = x00; end_of_dim2 = true; } else { x01 = getInputFloat(input_index + input_pitches[2]); } if (index_of_input_dim3 == (input_pitches[2] - 1)) { // It's the end in dimension 3 x10 = x00; x11 = x01; } else { x10 = getInputFloat(input_index + 1); x11 = end_of_dim2 ? x10 : getInputFloat(input_index + input_pitches[2] + 1); } float y0 = x00 + float(y_offset) * (x01 - x00) / float(scales[2]); float y1 = x10 + float(y_offset) * (x11 - x10) / float(scales[2]); return y0 + float(x_offset) * (y1 - y0) / float(scales[3]); }` : ` ${w} float process(int indices[2]) { int input_index = 0; int output_index = coordsToOffset(TexCoords, ${d}, ${g}); ${T} int m; int index_of_dim0, index_of_dim1; index_of_dim0 = output_index / output_pitches[0]; m = output_index - index_of_dim0 * output_pitches[0]; index_of_dim1 = m; int index_of_input_dim0, index_of_input_dim1, x_offset, y_offset; index_of_input_dim0 = index_of_dim0 / scales[0]; y_offset = index_of_dim0 - index_of_input_dim0 * scales[0]; index_of_input_dim1 = index_of_dim1 / scales[1]; x_offset = index_of_dim1 - index_of_input_dim1 * scales[1]; input_index = index_of_input_dim0 * input_pitches[0] + index_of_input_dim1; float x00 = getInputFloat(input_index); float x10, x01, x11; bool end_of_dim0 = false; if (index_of_input_dim0 == (${l[0].dims[0]} - 1)) { // It's the end in dimension 0 x01 = x00; end_of_dim0 = true; } else { x01 = getInputFloat(input_index + input_pitches[0]); } if (index_of_input_dim1 == (input_pitches[0] - 1)) { // It's the end in dimension 1 x10 = x00; x11 = x01; } else { x10 = getInputFloat(input_index + 1); x11 = end_of_dim0 ? x10 : getInputFloat(input_index + input_pitches[0] + 1); } float y0 = x00 + float(y_offset) * (x01 - x00) / float(scales[0]); float y1 = x10 + float(y_offset) * (x11 - x10) / float(scales[0]); return y0 + float(x_offset) * (y1 - y0) / float(scales[1]); }`; return Object.assign(Object.assign({}, s), { output: { dims: i, type: l[0].type, textureType: f.TextureType.unpacked }, shaderSource: S, variables: [{ name: "scales", type: "int", arrayLength: o.scales.length, data: o.scales.map(O => Math.ceil(O)) }] }) }; n.validateInputs = (p, l) => { if (!p || l.opset < 9 && p.length !== 1 || l.opset >= 9 && l.opset < 11 && p.length !== 2 || l.opset >= 11 && p.length < 2) throw new Error("invalid inputs."); if (l.scales.length > 0 && p[0].dims.length !== l.scales.length) throw new Error("Invalid input shape."); if (p[0].type === "string") throw new Error("Invalid input tensor types.") }, n.scalesValidation = (p, l, o) => { if (o) { for (const t of p) if (t <= 0) throw new Error("Scale value should be greater than 0.") } else for (const t of p) if (t < 1) throw new Error("Scale value should be greater than or equal to 1."); if (!(l !== "linear" && l !== "cubic" || p.length === 2 || p.length === 4 && p[0] === 1 && p[1] === 1)) throw new Error(`'Linear' mode and 'Cubic' mode only support 2-D inputs ('Bilinear', 'Bicubic') or 4-D inputs with the corresponding outermost 2 scale values being 1 in the ${o?"Resize":"Upsample"} opeartor.`) } }, 1958: (b, n, a) => { Object.defineProperty(n, "__esModule", { value: !0 }), n.ProgramManager = void 0; const u = a(1670), c = a(6231), f = a(8879), s = a(5060); n.ProgramManager = class { constructor(h, p, l) { this.profiler = h, this.glContext = p, this.textureLayoutStrategy = l, this.repo = new Map, this.attributesBound = !1 } getArtifact(h) { return this.repo.get(h) } setArtifact(h, p) { this.repo.set(h, p) } run(h, p, l) { var o; this.profiler.event("op", `ProgramManager.run ${(o=h.programInfo.name)!==null&&o!==void 0?o:"unknown kernel"}`, () => { var t; const e = this.glContext.gl, r = h.program; e.useProgram(r); try { this.bindOutput(l), this.attributesBound || this.bindAttributes(h.attribLocations), this.bindUniforms(h.uniformLocations, (t = h.programInfo.variables) !== null && t !== void 0 ? t : [], p) } catch (i) { throw c.Logger.error("ProgramManager", h.programInfo.shaderSource), i } this.profiler.event("backend", "GlContext.draw()", () => { this.glContext.draw() }) }, this.glContext) } dispose() { this.vertexShader && this.glContext.deleteShader(this.vertexShader), this.repo.forEach(h => this.glContext.deleteProgram(h.program)) } build(h, p, l) { return this.profiler.event("backend", "ProgramManager.build", () => { const o = new f.GlslPreprocessor(this.glContext, h, p, l), t = o.preprocess(), e = this.compile(t); return { programInfo: h, program: e, uniformLocations: this.getUniformLocations(e, o.context.programInfo.inputNames, o.context.programInfo.variables), attribLocations: this.getAttribLocations(e) } }) } compile(h) { if (!this.vertexShader) { c.Logger.verbose("ProrgramManager", "Compiling and caching Vertex shader for the first time"); const o = (0, s.getVertexShaderSource)(this.glContext.version); this.vertexShader = this.glContext.compileShader(o, this.glContext.gl.VERTEX_SHADER) } u.env.debug && c.Logger.verbose("ProrgramManager", `FragShader: ${h} `); const p = this.glContext.compileShader(h, this.glContext.gl.FRAGMENT_SHADER), l = this.glContext.createProgram(this.vertexShader, p); return this.glContext.deleteShader(p), l } bindOutput(h) { const p = h.width, l = h.height; c.Logger.verbose("ProrgramManager", `Binding output texture to Framebuffer: w/h=${p}/${l}, shape=${h.shape}, type=${h.tensor.type}`), this.glContext.attachFramebuffer(h.texture, p, l) } bindAttributes(h) { const p = h.position, l = h.textureCoord; this.glContext.setVertexAttributes(p, l), this.attributesBound = !0 } bindUniforms(h, p, l) { var o; const t = this.glContext.gl; let e = 0; for (const { name: r, type: i, location: d, arrayLength: g } of h) { const m = (o = p.find(_ => _.name === r)) === null || o === void 0 ? void 0 : o.data; if (i !== "sampler2D" && !m) throw new Error(`variable '${r}' does not have data defined in program info`); switch (i) { case "sampler2D": this.bindTexture(l[e], d, e), e++; break; case "float": g ? t.uniform1fv(d, m) : t.uniform1f(d, m); break; case "int": g ? t.uniform1iv(d, m) : t.uniform1i(d, m); break; default: throw new Error(`Uniform not implemented: ${i}`) } } } bindTexture(h, p, l) { this.glContext.bindTextureToUniform(h.texture, l, p) } getAttribLocations(h) { return { position: this.getAttribLocation(h, "position"), textureCoord: this.getAttribLocation(h, "textureCoord") } } getUniformLocations(h, p, l) { const o = []; if (p) for (const t of p) o.push({ name: t, type: "sampler2D", location: this.getUniformLocation(h, t) }); if (l) for (const t of l) o.push(Object.assign(Object.assign({}, t), { location: this.getUniformLocation(h, t.name) })); return o } getUniformLocation(h, p) { const l = this.glContext.gl.getUniformLocation(h, p); if (l === null) throw new Error(`Uniform ${p} not found.`); return l } getAttribLocation(h, p) { return this.glContext.gl.getAttribLocation(h, p) } } }, 6416: (b, n, a) => { Object.defineProperty(n, "__esModule", { value: !0 }), n.WebGLSessionHandler = void 0; const u = a(6231), c = a(1047), f = a(8316), s = a(1640), h = a(1958), p = a(7859), l = a(5702); n.WebGLSessionHandler = class { constructor(o, t) { this.backend = o, this.context = t, this.layoutStrategy = new p.PreferLogicalStrategy(o.glContext.maxTextureSize), this.programManager = new h.ProgramManager(this.context.profiler, o.glContext, this.layoutStrategy), this.textureManager = new l.TextureManager(o.glContext, this.layoutStrategy, this.context.profiler, { reuseTextures: o.textureCacheMode === "full" }), this.packedTextureDataCache = new Map, this.unpackedTextureDataCache = new Map, this.pack = o.pack, this.pack2unpackMap = new Map, this.unpack2packMap = new Map } createInferenceHandler() { return new f.WebGLInferenceHandler(this) } onGraphInitialized(o) { const t = o.getValues().filter(e => e.from === -1 && e.tensor).map(e => e.tensor.dataId); this.initializers = new Set(t) } isInitializer(o) { return !!this.initializers && this.initializers.has(o) } addInitializer(o) { this.initializers.add(o) } getTextureData(o, t) { return t ? this.packedTextureDataCache.get(o) : this.unpackedTextureDataCache.get(o) } setTextureData(o, t, e = !1) { u.Logger.verbose("WebGLSessionHandler", "Storing Texture data in cache"), e ? this.packedTextureDataCache.set(o, t) : this.unpackedTextureDataCache.set(o, t) } dispose() { this.programManager.dispose(), this.textureManager.clearActiveTextures(), this.packedTextureDataCache.forEach(o => this.textureManager.releaseTexture(o, !0)), this.packedTextureDataCache = new Map, this.unpackedTextureDataCache.forEach(o => this.textureManager.releaseTexture(o, !0)), this.unpackedTextureDataCache = new Map } resolve(o, t, e) { const r = (0, c.resolveOperator)(o, t, s.WEBGL_OP_RESOLVE_RULES); return { impl: r.opImpl, context: r.opInit ? r.opInit(o, e) : o } } } }, 7769: (b, n, a) => { Object.defineProperty(n, "__esModule", { value: !0 }), n.Uint8DataEncoder = n.RGBAFloatDataEncoder = n.RedFloat32DataEncoder = void 0; const u = a(6231); n.RedFloat32DataEncoder = class { constructor(c, f = 1) { if (f === 1) this.internalFormat = c.R32F, this.format = c.RED, this.textureType = c.FLOAT, this.channelSize = f; else { if (f !== 4) throw new Error(`Invalid number of channels: ${f}`); this.internalFormat = c.RGBA32F, this.format = c.RGBA, this.textureType = c.FLOAT, this.channelSize = f } } encode(c, f) { let s, h; return c.constructor !== Float32Array && (u.Logger.warning("Encoder", "data was not of type Float32; creating new Float32Array"), h = new Float32Array(c)), f * this.channelSize > c.length ? (u.Logger.warning("Encoder", "Source data too small. Allocating larger array"), h = c, s = this.allocate(f * this.channelSize), h.forEach((p, l) => s[l] = p)) : (h = c, s = h), s } allocate(c) { return new Float32Array(4 * c) } decode(c, f) { return this.channelSize === 1 ? c.filter((s, h) => h % 4 == 0).subarray(0, f) : c.subarray(0, f) } }, n.RGBAFloatDataEncoder = class { constructor(c, f = 1, s) { if (f !== 1 && f !== 4) throw new Error(`Invalid number of channels: ${f}`); this.internalFormat = c.RGBA, this.format = c.RGBA, this.channelSize = f, this.textureType = s || c.FLOAT } encode(c, f) { let s = c; return this.channelSize === 1 && (u.Logger.verbose("Encoder", "Exploding into a larger array"), s = this.allocate(f), c.forEach((h, p) => s[4 * p] = h)), s } allocate(c) { return new Float32Array(4 * c) } decode(c, f) { return this.channelSize === 1 ? c.filter((s, h) => h % 4 == 0).subarray(0, f) : c.subarray(0, f) } }, n.Uint8DataEncoder = class { constructor(c, f = 1) { if (this.channelSize = 4, f === 1) this.internalFormat = c.ALPHA, this.format = c.ALPHA, this.textureType = c.UNSIGNED_BYTE, this.channelSize = f; else { if (f !== 4) throw new Error(`Invalid number of channels: ${f}`); this.internalFormat = c.RGBA, this.format = c.RGBA, this.textureType = c.UNSIGNED_BYTE, this.channelSize = f } } encode(c, f) { return new Uint8Array(c.buffer, c.byteOffset, c.byteLength) } allocate(c) { return new Uint8Array(c * this.channelSize) } decode(c, f) { if (c instanceof Uint8Array) return c.subarray(0, f); throw new Error(`Invalid array type: ${c.constructor}`) } } }, 7859: (b, n, a) => { Object.defineProperty(n, "__esModule", { value: !0 }), n.getBatchDim = n.sizeToSquarishShape = n.getRowsCols = n.sizeFromShape = n.isInt = n.parseAxisParam = n.squeezeShape = n.PreferLogicalStrategy = n.AlwaysKeepOriginalSizeStrategy = void 0; const u = a(6231), c = a(2517); function f(o, t) { const e = [], r = [], i = t != null && Array.isArray(t) && t.length === 0, d = t == null || i ? null : s(t, o).sort(); let g = 0; for (let m = 0; m < o.length; ++m) { if (d != null) { if (d[g] === m && o[m] !== 1) throw new Error(`Can't squeeze axis ${m} since its dim '${o[m]}' is not 1`); (d[g] == null || d[g] > m) && o[m] === 1 && (e.push(o[m]), r.push(m)), d[g] <= m && g++ } o[m] !== 1 && (e.push(o[m]), r.push(m)) } return { newShape: e, keptDims: r } } function s(o, t) { const e = t.length; return o = o == null ? t.map((r, i) => i) : [].concat(o), (0, c.assert)(o.every(r => r >= -e && r < e), () => `All values in axis param must be in range [-${e}, ${e}) but got axis ${o}`), (0, c.assert)(o.every(h), () => `All values in axis param must be integers but got axis ${o}`), o.map(r => r < 0 ? e + r : r) } function h(o) { return o % 1 == 0 } function p(o) { if (o.length === 0) return 1; let t = o[0]; for (let e = 1; e < o.length; e++) t *= o[e]; return t } function l(o) { const t = Math.ceil(Math.sqrt(o)); return [t, Math.ceil(o / t)] } n.AlwaysKeepOriginalSizeStrategy = class { constructor(o) { this.maxTextureSize = o } computeTextureWH(o, t) { if (o.length === 0) return [1, 1]; const e = this.maxTextureSize; if (t && t.breakAxis !== void 0) { const d = t.breakAxis >= o.length ? 1 : o.slice(t.breakAxis).reduce((m, _) => m * _), g = t.breakAxis <= 0 ? 1 : o.slice(0, t.breakAxis).reduce((m, _) => m * _); if (!(d > e || g > e)) return [d, g]; u.Logger.verbose("TextureLayout", `Given width/height preferences were unattainable: shape:${o}, breakAxis:${t.breakAxis}`) } const r = o.reduce((d, g) => d * g); let i = Math.floor(Math.sqrt(r)); for (; i < e && i < r && r % i != 0; i++); if (i >= e || r % i != 0) throw new Error(`The given dimensions are outside this GPU's boundaries: ${o}`); return [i, r / i] } }, n.PreferLogicalStrategy = class { constructor(o) { this.maxTextureSize = o } computeTextureWH(o, t) { const e = this.computeTexture(o, t); return t && t.isPacked && (e[0] /= 2, e[1] /= 2), t && t.reverseWH ? [e[1], e[0]] : e } computeTexture(o, t) { const e = t && t.isPacked; if (o.length === 0) return e ? [2, 2] : [1, 1]; let r = this.maxTextureSize; if (t && t.breakAxis !== void 0) { const g = t.breakAxis >= o.length ? 1 : o.slice(t.breakAxis).reduce((_, y) => _ * y), m = t.breakAxis <= 0 ? 1 : o.slice(0, t.breakAxis).reduce((_, y) => _ * y); if (!(g > r || m > r)) return [g, m]; u.Logger.verbose("TextureLayout", `Given width/height preferences were unattainable: shape:${o}, breakAxis:${t.breakAxis}`) } let i = o.slice(0); e && (r *= 2, i = i.map((g, m) => m >= i.length - 2 ? i[m] % 2 == 0 ? i[m] : i[m] + 1 : i[m]), i.length === 1 && (i = [2, i[0]])), i.length !== 2 && (i = f(i).newShape); const d = p(i); return i.length <= 1 && d <= r ? [1, d] : i.length === 2 && i[0] <= r && i[1] <= r ? i : i.length === 3 && i[0] * i[1] <= r && i[2] <= r ? [i[0] * i[1], i[2]] : i.length === 3 && i[0] <= r && i[1] * i[2] <= r ? [i[0], i[1] * i[2]] : i.length === 4 && i[0] * i[1] * i[2] <= r && i[3] <= r ? [i[0] * i[1] * i[2], i[3]] : i.length === 4 && i[0] <= r && i[1] * i[2] * i[3] <= r ? [i[0], i[1] * i[2] * i[3]] : e ? l(d / 4).map(g => 2 * g) : l(d) } }, n.squeezeShape = f, n.parseAxisParam = s, n.isInt = h, n.sizeFromShape = p, n.getRowsCols = function(o) { if (o.length === 0) throw Error("Cannot get rows and columns of an empty shape array."); return [o.length > 1 ? o[o.length - 2] : 1, o[o.length - 1]] }, n.sizeToSquarishShape = l, n.getBatchDim = function(o, t = 2) { return p(o.slice(0, o.length - t)) } }, 4057: (b, n, a) => { Object.defineProperty(n, "__esModule", { value: !0 }), n.createTextureLayoutFromShape = n.calculateTextureWidthAndHeight = n.createTextureLayoutFromTextureType = void 0; const u = a(2517), c = a(2039); n.createTextureLayoutFromTextureType = (f, s, h) => { const p = h === c.TextureType.unpacked || h === c.TextureType.unpackedReversed ? 1 : 4, l = h === c.TextureType.packed, o = h === c.TextureType.unpackedReversed || h === c.TextureType.packed, t = h === c.TextureType.packedLastDimension ? s.length - 1 : void 0, e = h === c.TextureType.packedLastDimension ? s.map((r, i) => i === s.length - 1 ? 4 * r : r) : void 0; return (0, n.createTextureLayoutFromShape)(f, s, p, e, { isPacked: l, reverseWH: o, breakAxis: t }) }, n.calculateTextureWidthAndHeight = (f, s, h) => { const p = (0, n.createTextureLayoutFromTextureType)(f, s, h); return [p.width, p.height] }, n.createTextureLayoutFromShape = (f, s, h = 1, p, l) => { const o = !(!l || !l.isPacked), [t, e] = f.computeTextureWH(o && p || s, l), r = s.length; let i = s.slice(0); if (r === 0 && (i = [1]), h === 1) p = s; else if (o) { if (h !== 4) throw new Error("a packed texture must be 4-channel"); p = s, r > 0 && (i[r - 1] = Math.ceil(i[r - 1] / 2)), r > 1 && (i[r - 2] = Math.ceil(i[r - 2] / 2)) } else if (!p) throw new Error("Unpacked shape is needed when using channels > 1"); return { width: t, height: e, channels: h, isPacked: o, shape: i, strides: u.ShapeUtil.computeStrides(i), unpackedShape: p, reversedWH: l && l.reverseWH } } }, 5702: (b, n, a) => { Object.defineProperty(n, "__esModule", { value: !0 }), n.TextureManager = void 0; const u = a(6231); n.TextureManager = class { constructor(c, f, s, h) { this.glContext = c, this.layoutStrategy = f, this.profiler = s, this.config = h, this.pendingRead = new Map, h.reuseTextures && (this.inUseTextures = new Map, this.idleTextures = new Map, this.textureLookup = new Map) } createTextureFromLayout(c, f, s, h) { const p = this.toEncoderType(c), l = this.glContext.getEncoder(p, f.channels || 1, h); if (f.isPacked && h === 1) throw new Error("not implemented"); const o = f.width, t = f.height; let e, r; if (this.config.reuseTextures) { e = `${o}x${t}_${l.format}_${l.internalFormat}_${l.textureType}`, r = this.inUseTextures.get(e), r || (r = [], this.inUseTextures.set(e, r)); const d = this.idleTextures.get(e); if (d && d.length > 0) { const g = d.pop(); return r.push(g), h === 1 && this.glContext.updateTexture(g, o, t, l, this.toTextureData(c, s)), g } } u.Logger.verbose("TextureManager", `Creating new texture of size ${f.width}x${f.height}`); const i = this.glContext.allocateTexture(o, t, l, this.toTextureData(c, s)); return this.config.reuseTextures && (r.push(i), this.textureLookup.set(i, e)), i } readTexture(c, f, s) { return s || (s = 1), this.profiler.event("backend", "TextureManager.readTexture", () => { const h = c.shape.reduce((l, o) => l * o) * s, p = this.glContext.readTexture(c.texture, c.width, c.height, h, this.toEncoderType(f), s); return this.toTensorData(f, p) }) } async readTextureAsync(c, f, s) { const h = c.tensor.dataId; if (s || (s = 1), this.pendingRead.has(h)) { const p = this.pendingRead.get(h); return new Promise(l => p?.push(l)) } return this.profiler.event("backend", "TextureManager.readTextureAsync", async () => { this.pendingRead.set(h, []); const p = c.shape.reduce((e, r) => e * r) * s; await this.glContext.createAndWaitForFence(); const l = this.glContext.readTexture(c.texture, c.width, c.height, p, this.toEncoderType(f), s), o = this.toTensorData(f, l), t = this.pendingRead.get(h); return this.pendingRead.delete(h), t?.forEach(e => e(o)), o }) } readUint8TextureAsFloat(c) { return this.profiler.event("backend", "TextureManager.readUint8TextureAsFloat", () => { const f = c.shape.reduce((h, p) => h * p), s = this.glContext.readTexture(c.texture, c.width, c.height, 4 * f, "byte", 4); return new Float32Array(s.buffer, s.byteOffset, f) }) } releaseTexture(c, f) { let s; if (this.config.reuseTextures && (s = this.textureLookup.get(c.texture), s)) { f && this.textureLookup.delete(s); const h = this.inUseTextures.get(s); if (h) { const p = h.indexOf(c.texture); if (p !== -1) { h.splice(p, 1); let l = this.idleTextures.get(s); l || (l = [], this.idleTextures.set(s, l)), l.push(c.texture) } } } s && !f || (u.Logger.verbose("TextureManager", `Deleting texture of size ${c.width}x${c.height}`), this.glContext.deleteTexture(c.texture)) } toTensorData(c, f) { switch (c) { case "int16": return f instanceof Int16Array ? f : Int16Array.from(f); case "int32": return f instanceof Int32Array ? f : Int32Array.from(f); case "int8": return f instanceof Int8Array ? f : Int8Array.from(f); case "uint16": return f instanceof Uint16Array ? f : Uint16Array.from(f); case "uint32": return f instanceof Uint32Array ? f : Uint32Array.from(f); case "uint8": case "bool": return f instanceof Uint8Array ? f : Uint8Array.from(f); case "float32": return f instanceof Float32Array ? f : Float32Array.from(f); case "float64": return f instanceof Float64Array ? f : Float64Array.from(f); default: throw new Error(`TensorData type ${c} is not supported`) } } toTextureData(c, f) { if (f) return f instanceof Float32Array ? f : new Float32Array(f) } toEncoderType(c) { return "float" } clearActiveTextures() { this.glContext.clearActiveTextures() } } }, 2039: (b, n) => { var a; Object.defineProperty(n, "__esModule", { value: !0 }), n.TextureType = void 0, (a = n.TextureType || (n.TextureType = {}))[a.unpacked = 0] = "unpacked", a[a.unpackedReversed = 1] = "unpackedReversed", a[a.packed = 2] = "packed", a[a.downloadUint8AsFloat = 3] = "downloadUint8AsFloat", a[a.packedLastDimension = 4] = "packedLastDimension" }, 9390: (b, n, a) => { Object.defineProperty(n, "__esModule", { value: !0 }), n.getGlChannels = n.getCoordsDataType = n.getSqueezedParams = n.squeezeInputShape = n.generateShaderFuncNameFromInputSamplerNameAtOutCoords = n.generateShaderFuncNameFromInputSamplerName = n.repeatedTry = n.getPackedShape = void 0; const u = a(2517); n.getPackedShape = function(c) { const f = c.length; return c.slice(0, f - 1).concat(c[f - 1] / 4) }, n.repeatedTry = async function(c, f = h => 0, s) { return new Promise((h, p) => { let l = 0; const o = () => { if (c()) return void h(); l++; const t = f(l); s != null && l >= s ? p() : setTimeout(o, t) }; o() }) }, n.generateShaderFuncNameFromInputSamplerName = function(c) { return (0, u.assert)(c !== void 0 && c.length !== 0, () => "empty string found for sampler name"), "get" + c.charAt(0).toUpperCase() + c.slice(1) }, n.generateShaderFuncNameFromInputSamplerNameAtOutCoords = function(c) { return (0, u.assert)(c !== void 0 && c.length !== 0, () => "empty string found for sampler name"), "get" + c.charAt(0).toUpperCase() + c.slice(1) + "AtOutCoords" }, n.squeezeInputShape = function(c, f) { let s = JSON.parse(JSON.stringify(c)); return s = f, s }, n.getSqueezedParams = function(c, f) { return f.map(s => c[s]).join(", ") }, n.getCoordsDataType = function(c) { if (c <= 1) return "int"; if (c === 2) return "ivec2"; if (c === 3) return "ivec3"; if (c === 4) return "ivec4"; if (c === 5) return "ivec5"; if (c === 6) return "ivec6"; throw Error(`GPU for rank ${c} is not yet supported`) }, n.getGlChannels = function(c = 6) { return ["x", "y", "z", "w", "u", "v"].slice(0, c) } }, 7305: (b, n, a) => { Object.defineProperty(n, "__esModule", { value: !0 }), n.createNewWebGLContext = n.createWebGLContext = void 0; const u = a(6231), c = a(1713), f = {}; function s(h) { const p = function() { if (typeof document > "u") { if (typeof OffscreenCanvas > "u") throw new TypeError("failed to create canvas: OffscreenCanvas is not supported"); return new OffscreenCanvas(1, 1) } const t = document.createElement("canvas"); return t.width = 1, t.height = 1, t }(); let l; const o = { alpha: !1, depth: !1, antialias: !1, stencil: !1, preserveDrawingBuffer: !1, premultipliedAlpha: !1, failIfMajorPerformanceCaveat: !1 }; if ((!h || h === "webgl2") && (l = p.getContext("webgl2", o), l)) try { return new c.WebGLContext(l, 2) } catch (t) { u.Logger.warning("GlContextFactory", `failed to create WebGLContext using contextId 'webgl2'. Error: ${t}`) } if ((!h || h === "webgl") && (l = p.getContext("webgl", o) || p.getContext("experimental-webgl", o), l)) try { return new c.WebGLContext(l, 1) } catch (t) { u.Logger.warning("GlContextFactory", `failed to create WebGLContext using contextId 'webgl' or 'experimental-webgl'. Error: ${t}`) } throw new Error("WebGL is not supported") } n.createWebGLContext = function h(p) { let l; p && p !== "webgl2" || !("webgl2" in f) ? p && p !== "webgl" || !("webgl" in f) || (l = f.webgl) : l = f.webgl2, l = l || s(p), p = p || l.version === 1 ? "webgl" : "webgl2"; const o = l.gl; return f[p] = l, o.isContextLost() ? (delete f[p], h(p)) : (o.disable(o.DEPTH_TEST), o.disable(o.STENCIL_TEST), o.disable(o.BLEND), o.disable(o.DITHER), o.disable(o.POLYGON_OFFSET_FILL), o.disable(o.SAMPLE_COVERAGE), o.enable(o.SCISSOR_TEST), o.enable(o.CULL_FACE), o.cullFace(o.BACK), l) }, n.createNewWebGLContext = s }, 1713: function(b, n, a) { var u = this && this.__createBinding || (Object.create ? function(o, t, e, r) { r === void 0 && (r = e); var i = Object.getOwnPropertyDescriptor(t, e); i && !("get" in i ? !t.__esModule : i.writable || i.configurable) || (i = { enumerable: !0, get: function() { return t[e] } }), Object.defineProperty(o, r, i) } : function(o, t, e, r) { r === void 0 && (r = e), o[r] = t[e] }), c = this && this.__setModuleDefault || (Object.create ? function(o, t) { Object.defineProperty(o, "default", { enumerable: !0, value: t }) } : function(o, t) { o.default = t }), f = this && this.__importStar || function(o) { if (o && o.__esModule) return o; var t = {}; if (o != null) for (var e in o) e !== "default" && Object.prototype.hasOwnProperty.call(o, e) && u(t, o, e); return c(t, o), t }; Object.defineProperty(n, "__esModule", { value: !0 }), n.WebGLContext = n.linearSearchLastTrue = void 0; const s = a(1670), h = f(a(7769)), p = a(9390); function l(o) { let t = 0; for (; t < o.length && o[t](); ++t); return t - 1 } n.linearSearchLastTrue = l, n.WebGLContext = class { constructor(o, t) { this.frameBufferBound = !1, this.itemsToPoll = [], this.gl = o, this.version = t, this.getExtensions(), this.vertexbuffer = this.createVertexbuffer(), this.framebuffer = this.createFramebuffer(), this.queryVitalParameters() } allocateTexture(o, t, e, r) { const i = this.gl, d = i.createTexture(); i.bindTexture(i.TEXTURE_2D, d), i.texParameteri(i.TEXTURE_2D, i.TEXTURE_MIN_FILTER, i.NEAREST), i.texParameteri(i.TEXTURE_2D, i.TEXTURE_MAG_FILTER, i.NEAREST), i.texParameteri(i.TEXTURE_2D, i.TEXTURE_WRAP_S, i.CLAMP_TO_EDGE), i.texParameteri(i.TEXTURE_2D, i.TEXTURE_WRAP_T, i.CLAMP_TO_EDGE); const g = r ? e.encode(r, o * t) : null; return i.texImage2D(i.TEXTURE_2D, 0, e.internalFormat, o, t, 0, e.format, e.textureType, g), this.checkError(), d } updateTexture(o, t, e, r, i) { const d = this.gl; d.bindTexture(d.TEXTURE_2D, o); const g = r.encode(i, t * e); d.texSubImage2D(d.TEXTURE_2D, 0, 0, 0, t, e, r.format, r.textureType, g), this.checkError() } attachFramebuffer(o, t, e) { const r = this.gl; r.bindTexture(r.TEXTURE_2D, o), r.bindFramebuffer(r.FRAMEBUFFER, this.framebuffer), r.framebufferTexture2D(r.FRAMEBUFFER, r.COLOR_ATTACHMENT0, r.TEXTURE_2D, o, 0), this.checkError(), r.viewport(0, 0, t, e), r.scissor(0, 0, t, e) } readTexture(o, t, e, r, i, d) { const g = this.gl; d || (d = 1), this.frameBufferBound || this.attachFramebuffer(o, t, e); const m = this.getEncoder(i, d), _ = m.allocate(t * e); return g.bindTexture(g.TEXTURE_2D, o), g.framebufferTexture2D(g.FRAMEBUFFER, g.COLOR_ATTACHMENT0, g.TEXTURE_2D, o, 0), g.readPixels(0, 0, t, e, g.RGBA, m.textureType, _), this.checkError(), m.decode(_, r) } isFramebufferReady() { return !0 } getActiveTexture() { const o = this.gl; return "TEXTURE" + (o.getParameter(this.gl.ACTIVE_TEXTURE) - o.TEXTURE0) } getTextureBinding() { return this.gl.getParameter(this.gl.TEXTURE_BINDING_2D) } getFramebufferBinding() { return this.gl.getParameter(this.gl.FRAMEBUFFER_BINDING) } setVertexAttributes(o, t) { const e = this.gl; e.vertexAttribPointer(o, 3, e.FLOAT, !1, 20, 0), e.enableVertexAttribArray(o), t !== -1 && (e.vertexAttribPointer(t, 2, e.FLOAT, !1, 20, 12), e.enableVertexAttribArray(t)), this.checkError() } createProgram(o, t) { const e = this.gl, r = e.createProgram(); return e.attachShader(r, o), e.attachShader(r, t), e.linkProgram(r), r } compileShader(o, t) { const e = this.gl, r = e.createShader(t); if (!r) throw new Error(`createShader() returned null with type ${t}`); if (e.shaderSource(r, o), e.compileShader(r), e.getShaderParameter(r, e.COMPILE_STATUS) === !1) throw new Error(`Failed to compile shader: ${e.getShaderInfoLog(r)} Shader source: ${o}`); return r } deleteShader(o) { this.gl.deleteShader(o) } bindTextureToUniform(o, t, e) { const r = this.gl; r.activeTexture(r.TEXTURE0 + t), this.checkError(), r.bindTexture(r.TEXTURE_2D, o), this.checkError(), r.uniform1i(e, t), this.checkError() } draw() { this.gl.drawArrays(this.gl.TRIANGLE_STRIP, 0, 4), this.checkError() } checkError() { if (s.env.debug) { const o = this.gl, t = o.getError(); let e = ""; switch (t) { case o.NO_ERROR: return; case o.INVALID_ENUM: e = "INVALID_ENUM"; break; case o.INVALID_VALUE: e = "INVALID_VALUE"; break; case o.INVALID_OPERATION: e = "INVALID_OPERATION"; break; case o.INVALID_FRAMEBUFFER_OPERATION: e = "INVALID_FRAMEBUFFER_OPERATION"; break; case o.OUT_OF_MEMORY: e = "OUT_OF_MEMORY"; break; case o.CONTEXT_LOST_WEBGL: e = "CONTEXT_LOST_WEBGL"; break; default: e = `Unknown WebGL Error: ${t.toString(16)}` } throw new Error(e) } } deleteTexture(o) { this.gl.deleteTexture(o) } deleteProgram(o) { this.gl.deleteProgram(o) } getEncoder(o, t, e = 0) { if (this.version === 2) return new h.RedFloat32DataEncoder(this.gl, t); switch (o) { case "float": return e === 1 || this.isRenderFloat32Supported ? new h.RGBAFloatDataEncoder(this.gl, t) : new h.RGBAFloatDataEncoder(this.gl, t, this.textureHalfFloatExtension.HALF_FLOAT_OES); case "int": throw new Error("not implemented"); case "byte": return new h.Uint8DataEncoder(this.gl, t); default: throw new Error(`Invalid dataType: ${o}`) } } clearActiveTextures() { const o = this.gl; for (let t = 0; t < this.maxTextureImageUnits; ++t) o.activeTexture(o.TEXTURE0 + t), o.bindTexture(o.TEXTURE_2D, null) } dispose() { if (this.disposed) return; const o = this.gl; o.bindFramebuffer(o.FRAMEBUFFER, null), o.deleteFramebuffer(this.framebuffer), o.bindBuffer(o.ARRAY_BUFFER, null), o.deleteBuffer(this.vertexbuffer), o.bindBuffer(o.ELEMENT_ARRAY_BUFFER, null), o.finish(), this.disposed = !0 } createDefaultGeometry() { return new Float32Array([-1, 1, 0, 0, 1, -1, -1, 0, 0, 0, 1, 1, 0, 1, 1, 1, -1, 0, 1, 0]) } createVertexbuffer() { const o = this.gl, t = o.createBuffer(); if (!t) throw new Error("createBuffer() returned null"); const e = this.createDefaultGeometry(); return o.bindBuffer(o.ARRAY_BUFFER, t), o.bufferData(o.ARRAY_BUFFER, e, o.STATIC_DRAW), this.checkError(), t } createFramebuffer() { const o = this.gl.createFramebuffer(); if (!o) throw new Error("createFramebuffer returned null"); return o } queryVitalParameters() { const o = this.gl; if (this.isFloatTextureAttachableToFrameBuffer = this.checkFloatTextureAttachableToFrameBuffer(), this.isRenderFloat32Supported = this.checkRenderFloat32(), this.isFloat32DownloadSupported = this.checkFloat32Download(), this.version === 1 && !this.textureHalfFloatExtension && !this.isRenderFloat32Supported) throw new Error("both float32 and float16 TextureType are not supported"); this.isBlendSupported = !this.isRenderFloat32Supported || this.checkFloat32Blend(), this.maxTextureSize = o.getParameter(o.MAX_TEXTURE_SIZE), this.maxTextureImageUnits = o.getParameter(o.MAX_TEXTURE_IMAGE_UNITS), this.version } getExtensions() { this.version === 2 ? (this.colorBufferFloatExtension = this.gl.getExtension("EXT_color_buffer_float"), this.disjointTimerQueryWebgl2Extension = this.gl.getExtension("EXT_disjoint_timer_query_webgl2")) : (this.textureFloatExtension = this.gl.getExtension("OES_texture_float"), this.textureHalfFloatExtension = this.gl.getExtension("OES_texture_half_float")) } checkFloatTextureAttachableToFrameBuffer() { const o = this.gl, t = o.createTexture(); o.bindTexture(o.TEXTURE_2D, t); const e = this.version === 2 ? o.RGBA32F : o.RGBA; o.texImage2D(o.TEXTURE_2D, 0, e, 1, 1, 0, o.RGBA, o.FLOAT, null); const r = o.createFramebuffer(); o.bindFramebuffer(o.FRAMEBUFFER, r), o.framebufferTexture2D(o.FRAMEBUFFER, o.COLOR_ATTACHMENT0, o.TEXTURE_2D, t, 0); const i = o.checkFramebufferStatus(o.FRAMEBUFFER) === o.FRAMEBUFFER_COMPLETE; return o.bindTexture(o.TEXTURE_2D, null), o.bindFramebuffer(o.FRAMEBUFFER, null), o.deleteTexture(t), o.deleteFramebuffer(r), i } checkRenderFloat32() { if (this.version === 2) { if (!this.colorBufferFloatExtension) return !1 } else if (!this.textureFloatExtension) return !1; return this.isFloatTextureAttachableToFrameBuffer } checkFloat32Download() { if (this.version === 2) { if (!this.colorBufferFloatExtension) return !1 } else if (!this.textureFloatExtension || !this.gl.getExtension("WEBGL_color_buffer_float")) return !1; return this.isFloatTextureAttachableToFrameBuffer } checkFloat32Blend() { const o = this.gl; let t, e, r, i, d; try { t = o.createTexture(), e = o.createFramebuffer(), o.bindTexture(o.TEXTURE_2D, t); const g = this.version === 2 ? o.RGBA32F : o.RGBA; return o.texImage2D(o.TEXTURE_2D, 0, g, 1, 1, 0, o.RGBA, o.FLOAT, null), o.bindFramebuffer(o.FRAMEBUFFER, e), o.framebufferTexture2D(o.FRAMEBUFFER, o.COLOR_ATTACHMENT0, o.TEXTURE_2D, t, 0), o.enable(o.BLEND), r = o.createShader(o.VERTEX_SHADER), !!r && (o.shaderSource(r, "void main(){}"), o.compileShader(r), i = o.createShader(o.FRAGMENT_SHADER), !!i && (o.shaderSource(i, "precision highp float;void main(){gl_FragColor=vec4(0.5);}"), o.compileShader(i), d = o.createProgram(), !!d && (o.attachShader(d, r), o.attachShader(d, i), o.linkProgram(d), o.useProgram(d), o.drawArrays(o.POINTS, 0, 1), o.getError() === o.NO_ERROR))) } finally { o.disable(o.BLEND), d && o.deleteProgram(d), r && o.deleteShader(r), i && o.deleteShader(i), e && (o.bindFramebuffer(o.FRAMEBUFFER, null), o.deleteFramebuffer(e)), t && (o.bindTexture(o.TEXTURE_2D, null), o.deleteTexture(t)) } } beginTimer() { if (this.version === 2 && this.disjointTimerQueryWebgl2Extension) { const o = this.gl, t = this.disjointTimerQueryWebgl2Extension, e = o.createQuery(); return o.beginQuery(t.TIME_ELAPSED_EXT, e), e } throw new Error("WebGL1 profiling currently not supported.") } endTimer() { if (this.version !== 2 || !this.disjointTimerQueryWebgl2Extension) throw new Error("WebGL1 profiling currently not supported"); { const o = this.gl, t = this.disjointTimerQueryWebgl2Extension; o.endQuery(t.TIME_ELAPSED_EXT) } } isTimerResultAvailable(o) { let t = !1, e = !1; if (this.version !== 2 || !this.disjointTimerQueryWebgl2Extension) throw new Error("WebGL1 profiling currently not supported"); { const r = this.gl, i = this.disjointTimerQueryWebgl2Extension; t = r.getQueryParameter(o, r.QUERY_RESULT_AVAILABLE), e = r.getParameter(i.GPU_DISJOINT_EXT) } return t && !e } getTimerResult(o) { let t = 0; if (this.version !== 2) throw new Error("WebGL1 profiling currently not supported"); { const e = this.gl; t = e.getQueryParameter(o, e.QUERY_RESULT), e.deleteQuery(o) } return t / 1e6 } async waitForQueryAndGetTime(o) { return await (0, p.repeatedTry)(() => this.isTimerResultAvailable(o)), this.getTimerResult(o) } async createAndWaitForFence() { const o = this.createFence(this.gl); return this.pollFence(o) } createFence(o) { let t; const e = o, r = e.fenceSync(e.SYNC_GPU_COMMANDS_COMPLETE, 0); return o.flush(), t = r === null ? () => !0 : () => { const i = e.clientWaitSync(r, 0, 0); return i === e.ALREADY_SIGNALED || i === e.CONDITION_SATISFIED }, { query: r, isFencePassed: t } } async pollFence(o) { return new Promise(t => { this.addItemToPoll(() => o.isFencePassed(), () => t()) }) } pollItems() { const o = l(this.itemsToPoll.map(t => t.isDoneFn)); for (let t = 0; t <= o; ++t) { const { resolveFn: e } = this.itemsToPoll[t]; e() } this.itemsToPoll = this.itemsToPoll.slice(o + 1) } async addItemToPoll(o, t) { this.itemsToPoll.push({ isDoneFn: o, resolveFn: t }), this.itemsToPoll.length > 1 || await (0, p.repeatedTry)(() => (this.pollItems(), this.itemsToPoll.length === 0)) } } }, 1036: (b, n, a) => { Object.defineProperty(n, "__esModule", { value: !0 }), n.ExecutionPlan = void 0; const u = a(6231); class c { constructor(s, h) { this.op = s, this.node = h } } n.ExecutionPlan = class { constructor(f, s, h) { this.graph = f, this.profiler = h, this.initialize(s) } initialize(f) { this.profiler.event("session", "ExecutionPlan.initialize", () => { const s = this.graph.getNodes(); if (s.length !== f.length) throw new Error("The size of nodes and OPs do not match."); this._ops = f.map((h, p) => new c(h, s[p])), this.reset(), this._starter = [], this._ops.forEach((h, p) => { let l = !0; for (const o of h.node.inputs) if (!this._values[o] && this.graph.getInputIndices().indexOf(o) === -1) { l = !1; break } l && this._starter.push(p) }) }) } reset() { this._values = this.graph.getValues().map(f => f.tensor) } async execute(f, s) { return this.profiler.event("session", "ExecutionPlan.execute", async () => { this.reset(); const h = f.createInferenceHandler(), p = this.graph.getInputIndices(); if (s.length !== p.length) throw new Error(`number of input tensors don't match the number of inputs to the model: actual: ${s.length} expected: ${p.length}`); s.forEach((i, d) => { const g = p[d]; this._values[g] = i }); const l = this._starter.slice(0), o = this.graph.getValues(), t = this.graph.getNodes(); let e = 0; for (; e < l.length;) { const i = l[e++], d = this._ops[i], g = d.node.inputs.map(T => this._values[T]); if (g.indexOf(void 0) !== -1) throw new Error(`unresolved input detected: op: ${d.node}`); const m = g; u.Logger.verbose("ExecPlan", `Runing op:${d.node.name} (${m.map((T,w)=>`'${d.node.inputs[w]}': ${T.type}[${T.dims.join(",")}]`).join(", ")})`); const _ = await this.profiler.event("node", d.node.name, async () => d.op.impl(h, m, d.op.context)); if (_.length !== d.node.outputs.length) throw new Error("the size of output does not match model definition."); _.forEach((T, w) => { const S = d.node.outputs[w]; if (this._values[S]) throw new Error(`output [${S}] already has value: op:${d.node.name}`); this._values[S] = T }); const y = new Set; _.forEach((T, w) => { const S = d.node.outputs[w]; for (const O of o[S].to) { const E = t[O]; let v = !0; for (const P of E.inputs) if (!this._values[P]) { v = !1; break } v && y.add(O) } }), l.push(...y) } const r = []; for (let i = 0; i < this.graph.getOutputIndices().length; i++) { const d = this.graph.getOutputIndices()[i], g = this._values[d]; if (g === void 0) throw new Error(`required output [${d}] does not have value`); d === 0 ? await g.getData() : g.data, r.push(g) } return u.Logger.verbose("ExecPlan", "disposing of inferenceHandler"), h.dispose(), r }) } } }, 7070: (b, n, a) => { Object.defineProperty(n, "__esModule", { value: !0 }), n.Graph = void 0; const u = a(1446), c = a(7778), f = a(9395), s = a(9162), h = a(2517); var p = f.onnxruntime.experimental.fbs; n.Graph = { from: (e, r) => new t(e, r) }; class l { constructor(r) { this._from = void 0, this._to = [], this.tensor = void 0, this.type = void 0, r && (this.type = h.ProtoUtil.tensorValueTypeFromProto(r.type.tensorType)) } get from() { return this._from } get to() { return this._to } } class o { constructor(r, i) { r instanceof u.onnx.NodeProto ? (this.name = r.name, this.opType = r.opType, this.attributes = new c.Attribute(r.attribute)) : r instanceof p.Node && (this.name = i ?? r.name(), this.opType = r.opType(), this.attributes = new c.Attribute(h.ProtoUtil.tensorAttributesFromORTFormat(r))), this.inputs = [], this.outputs = [], this.executeNode = !0 } } class t { constructor(r, i) { if (!r) throw new TypeError("graph is empty"); this.buildGraph(r), this.transformGraph(i), this.checkIsAcyclic() } getInputIndices() { return this._allInputIndices } getInputNames() { return this._allInputNames } getOutputIndices() { return this._allOutputIndices } getOutputNames() { return this._allOutputNames } getValues() { return this._allData } getNodes() { return this._nodes } buildGraph(r) { if (r instanceof u.onnx.GraphProto) this.buildGraphFromOnnxFormat(r); else { if (!(r instanceof p.Graph)) throw new TypeError("Graph type is not supported."); this.buildGraphFromOrtFormat(r) } } buildGraphFromOnnxFormat(r) { const i = new Map; this._allData = [], this._allInputIndices = [], this._allInputNames = [], this._allOutputIndices = [], this._allOutputNames = [], this._nodes = []; const d = new Map; if (!r.input) throw new Error("missing information in graph: input"); const g = []; for (const m of r.input) { if (i.has(m.name)) throw new Error(`duplicated input name: ${m.name}`); const _ = this._allData.push(new l(m)) - 1; i.set(m.name, _), g.push(m.name) } if (!r.initializer) throw new Error("missing information in graph: initializer"); for (const m of r.initializer) { let _ = i.get(m.name); if (_ === void 0) { const y = new l; y.type = { shape: { dims: h.ProtoUtil.tensorDimsFromProto(m.dims) }, tensorType: h.ProtoUtil.tensorDataTypeFromProto(m.dataType) }, _ = this._allData.push(y) - 1, i.set(m.name, _) } this._allData[_]._from = -1, this._allData[_].tensor = s.Tensor.fromProto(m) } for (let m = 0; m < this._allData.length; m++) this._allData[m].tensor || (this._allInputIndices.push(m), this._allInputNames.push(g[m])); if (!r.output) throw new Error("missing information in graph: output"); for (const m of r.output) { if (i.has(m.name)) throw new Error(`duplicated output name: ${m.name}`); const _ = this._allData.push(new l(m)) - 1; i.set(m.name, _), this._allOutputIndices.push(_), this._allOutputNames.push(m.name) } if (!r.node) throw new Error("missing information in graph: node"); for (const m of r.node) { if (!m.name) for (let y = 0;; y++) { const T = `unnamed_${m.opType}_${y}`; if (!d.has(T)) { m.name = T; break } } if (d.has(m.name)) throw new Error(`duplicated node name: ${m.name}`); const _ = this._nodes.push(new o(m)) - 1; d.set(m.name, _) } for (let m = 0; m < this._nodes.length; m++) { const _ = this._nodes[m], y = r.node[m]; if (!y.output) throw new Error(`missing output for node: ${y.name}`); for (const T of y.output) { let w = i.get(T); if (w === void 0 && (w = this._allData.push(new l) - 1, i.set(T, w)), _.outputs.push(w), this._allData[w]._from !== void 0) throw new Error(`multiple nodes output to one data value: ${w}`); if (this._allData[w]._from = m, y.opType === "Constant") { if (!y.attribute || y.attribute.length !== 1 || !y.attribute[0].t) throw new Error("missing attributes or missing tensor value in attributes for this Constant operator"); if (!y.output || y.output.length !== 1) throw new Error("missing output or incorrect number of outputs for this Constant operator"); _.outputs.pop(), _.executeNode = !1, this._allData[w]._from = -1, this._allData[w].tensor = s.Tensor.fromProto(y.attribute[0].t) } } } for (let m = 0; m < this._nodes.length; m++) { const _ = this._nodes[m], y = r.node[m]; if (!y.input) throw new Error(`missing input for node: ${y.name}`); for (const T of y.input) { const w = i.get(T); if (w === void 0) { if (T === "" && y.input.length === 3 && y.opType === "Resize") continue; throw new Error(`unrecognized input '${T}' for node: ${y.name}`) } _.inputs.push(w), this._allData[w]._to.push(m) } } return !0 } buildGraphFromOrtFormat(r) { var i, d, g; const m = new Map; this._allData = [], this._allInputIndices = [], this._allInputNames = [], this._allOutputIndices = [], this._allOutputNames = [], this._nodes = []; const _ = new Map, y = []; for (let T = 0; T < r.inputsLength(); T++) { const w = r.inputs(T); if (m.has(w)) throw new Error(`duplicated input name: ${w}`); for (let S = 0; S < r.nodeArgsLength(); S++) if (((i = r.nodeArgs(S)) === null || i === void 0 ? void 0 : i.name()) === w) { const O = new l; if (((g = (d = r.nodeArgs(S)) === null || d === void 0 ? void 0 : d.type()) === null || g === void 0 ? void 0 : g.valueType()) !== p.TypeInfoValue.tensor_type) throw new Error("Unexpected value type for the nodeArg."); const E = r.nodeArgs(S).type().value(new p.TensorTypeAndShape), v = h.ProtoUtil.tensorDataTypeFromProto(E.elemType()), P = E.shape(), L = []; for (let R = 0; R < P.dimLength(); R++) L.push(h.LongUtil.longToNumber(P.dim(R).value().dimValue())); O.type = { shape: { dims: L }, tensorType: v }; const V = this._allData.push(O) - 1; m.set(w, V), y.push(w) } } for (let T = 0; T < r.initializersLength(); T++) { const w = r.initializers(T); let S = m.get(w.name()); if (S === void 0) { const O = new l, E = h.ProtoUtil.tensorDimsFromORTFormat(w), v = h.ProtoUtil.tensorDataTypeFromProto(w.dataType()); O.type = { shape: { dims: E }, tensorType: v }, S = this._allData.push(O) - 1, m.set(w.name(), S) } this._allData[S]._from = -1, this._allData[S].tensor = s.Tensor.fromOrtTensor(w) } for (let T = 0; T < this._allData.length; T++) this._allData[T].tensor || (this._allInputIndices.push(T), this._allInputNames.push(y[T])); for (let T = 0; T < r.outputsLength(); T++) { const w = r.outputs(T); if (m.has(w)) throw new Error(`duplicated output name: ${w}`); const S = this._allData.push(new l) - 1; m.set(w, S), this._allOutputIndices.push(S), this._allOutputNames.push(w) } if (!r.nodes) throw new Error("missing information in graph: node"); for (let T = 0; T < r.nodesLength(); T++) { const w = r.nodes(T); let S = w.name(); if (!S) for (let E = 0; S = `unnamed_${w.opType()}_${E}`, _.has(S); E++); if (_.has(S)) throw new Error(`duplicated node name: ${S}`); const O = this._nodes.push(new o(w, S)) - 1; _.set(S, O) } for (let T = 0; T < this._nodes.length; T++) { const w = this._nodes[T], S = r.nodes(T); if (S == null) throw new Error(`No node exists at index ${T}`); if (S?.outputsLength() === 0) throw new Error(`missing output for node: ${S.name}`); for (let O = 0; O < S?.outputsLength(); O++) { const E = S?.outputs(O); let v = m.get(E); if (v === void 0 && (v = this._allData.push(new l) - 1, m.set(E, v)), w.outputs.push(v), this._allData[v]._from !== void 0) throw new Error(`multiple nodes output to one data value: ${v}`); if (this._allData[v]._from = T, S.opType() === "Constant") { if (S.attributesLength() !== 1 || !S.attributes(0).t()) throw new Error("missing attributes or missing tensor value in attributes for this Constant operator"); if (S.outputsLength() !== 1) throw new Error("missing output or incorrect number of outputs for this Constant operator"); w.outputs.pop(), w.executeNode = !1, this._allData[v]._from = -1, this._allData[v].tensor = s.Tensor.fromOrtTensor(S.attributes(0).t()) } } } for (let T = 0; T < this._nodes.length; T++) { const w = this._nodes[T], S = r.nodes(T); if (S.inputsLength() === 0) throw new Error(`missing input for node: ${S.name}`); for (let O = 0; O < S.inputsLength(); O++) { const E = S.inputs(O), v = m.get(E); if (v === void 0) throw new Error(`unrecognized input '${E}' for node: ${S.name()}`); w.inputs.push(v), this._allData[v]._to.push(T) } } } checkIsAcyclic() { const r = new Set; this._allInputIndices.forEach(g => { this._allData[g]._to.forEach(m => { r.add(m) }) }); const i = Array.from(r), d = new Array(this._nodes.length).fill("white"); for (; i.length > 0;) { const g = i.pop(); d[g] === "gray" ? d[g] = "black" : (i.push(g), d[g] = "gray", this._nodes[g].outputs.forEach(m => { const _ = this._allData[m]; if (_.tensor !== void 0) throw new Error("node outputs should not be initialized"); if (_._from !== g) throw new Error("from property of the Value object doesn't match index of Node being processed"); _._to.forEach(y => { if (d[y] === "gray") throw new Error("model graph is cyclic"); d[y] === "white" && i.push(y) }) })) } } transformGraph(r) { this.removeAllIdentityNodes(), this.removeAllDropoutNodes(), this.fuseConvActivationNodes(), r && r.transformGraph(this), this.finalizeGraph() } finalizeGraph() { let r = 0; for (let i = 0; i < this._nodes.length; i++) this._nodes[i].executeNode ? r > 0 && (this._nodes[i].inputs.forEach(d => { const g = this._allData[d]._to.indexOf(i + r); g !== -1 && (this._allData[d]._to[g] = i) }), this._nodes[i].outputs.forEach(d => { this._allData[d]._from && this._allData[d]._from === i + r && (this._allData[d]._from = i) })) : (r++, this._nodes[i].outputs.forEach(d => { this._allData[d]._from = -2 }), this._nodes.splice(i, 1), i--); r = 0; for (let i = 0; i < this._allData.length; i++) if (this._allData[i].from !== -2 || this._allOutputIndices.indexOf(i + r) !== -1) { if (r > 0) { let d = -1; this._allData[i].from !== void 0 && this._allData[i].from !== -1 ? (d = this._nodes[this._allData[i].from].outputs.indexOf(i + r), d !== -1 && (this._nodes[this._allData[i].from].outputs[d] = i)) : (d = this._allInputIndices.indexOf(i + r), d !== -1 && (this._allInputIndices[d] = i)), this._allData[i].to.forEach(g => { d = this._nodes[g].inputs.indexOf(i + r), d !== -1 && (this._nodes[g].inputs[d] = i) }), this._allData[i].to.length === 0 && (d = this._allOutputIndices.indexOf(i + r), d !== -1 && (this._allOutputIndices[d] = i)) } } else r++, this._allData.splice(i, 1), i-- } deleteNode(r) { const i = this._nodes[r]; if (i.outputs.length > 1) { for (let T = 1; T < i.outputs.length; T++) if (this._allData[i.outputs[T]].to.length > 0) throw new Error("Node deletion with more than one output connected to other nodes is not supported. ") } i.executeNode = !1; const d = i.inputs[0], g = i.outputs[0], m = this._allData[g].to, _ = this._allData[d].to.indexOf(r); if (_ === -1) throw new Error("The Value object doesn't have the current Node in it's 'to' property "); this._allData[d].to.splice(_, 1), this._allData[g]._to = []; const y = this._allOutputIndices.indexOf(g); if (y !== -1 && (this._allOutputIndices[y] = d), m && m.length > 0) for (const T of m) { const w = this._nodes[T].inputs.indexOf(g); if (w === -1) throw new Error("The Node object doesn't have the output Value in it's 'inputs' property "); this._nodes[T].inputs[w] = d, this._allData[d].to.push(T) } } removeAllDropoutNodes() { let r = 0; for (const i of this._nodes) { if (i.opType === "Dropout") { if (i.inputs.length !== 1) throw new Error("Dropout nodes should only contain one input. "); if (i.outputs.length !== 1 && i.outputs.length !== 2) throw new Error("Dropout nodes should contain either 1 or 2 output(s)"); if (i.outputs.length === 2 && this._allData[i.outputs[1]]._to.length !== 0) throw new Error("Dropout nodes's second output should not be referenced by other nodes"); this.deleteNode(r) } r++ } } removeAllIdentityNodes() { let r = 0; for (const i of this._nodes) i.opType === "Identity" && this.deleteNode(r), r++ } isActivation(r) { switch (r.opType) { case "Relu": case "Sigmoid": case "Clip": return !0; default: return !1 } } fuseConvActivationNodes() { for (const r of this._nodes) if (r.opType === "Conv") { const i = this._allData[r.outputs[0]]._to; if (i.length === 1 && this.isActivation(this._nodes[i[0]])) { const d = this._nodes[i[0]]; if (d.opType === "Clip") if (d.inputs.length === 1) try { r.attributes.set("activation_params", "floats", [d.attributes.getFloat("min"), d.attributes.getFloat("max")]) } catch { r.attributes.set("activation_params", "floats", [h.MIN_CLIP, h.MAX_CLIP]) } else { if (!(d.inputs.length >= 3 && this._allData[d.inputs[1]].tensor !== void 0 && this._allData[d.inputs[2]].tensor !== void 0)) continue; r.attributes.set("activation_params", "floats", [this._allData[d.inputs[1]].tensor.floatData[0], this._allData[d.inputs[2]].tensor.floatData[0]]) } r.attributes.set("activation", "string", d.opType), this.deleteNode(i[0]) } } } } }, 6231: (b, n) => { Object.defineProperty(n, "__esModule", { value: !0 }), n.now = n.Profiler = n.Logger = void 0; const a = { verbose: 1e3, info: 2e3, warning: 4e3, error: 5e3, fatal: 6e3 }, u = { none: new class { log(o, t, e) {} }, console: new class { log(o, t, e) { console.log(`${this.color(o)} ${e?"\x1B[35m"+e+"\x1B[0m ":""}${t}`) } color(o) { switch (o) { case "verbose": return "\x1B[34;40mv\x1B[0m"; case "info": return "\x1B[32mi\x1B[0m"; case "warning": return "\x1B[30;43mw\x1B[0m"; case "error": return "\x1B[31;40me\x1B[0m"; case "fatal": return "\x1B[101mf\x1B[0m"; default: throw new Error(`unsupported severity: ${o}`) } } } }, c = { provider: "console", minimalSeverity: "warning", logDateTime: !0, logSourceLocation: !1 }; let f = { "": c }; function s(o, t, e, r) { if (t === void 0) return i = o, { verbose: s.verbose.bind(null, i), info: s.info.bind(null, i), warning: s.warning.bind(null, i), error: s.error.bind(null, i), fatal: s.fatal.bind(null, i) }; if (e === void 0) h(o, t); else if (typeof e == "number" && r === void 0) h(o, t); else if (typeof e == "string" && r === void 0) h(o, e, 0, t); else { if (typeof e != "string" || typeof r != "number") throw new TypeError("input is valid"); h(o, e, 0, t) } var i } function h(o, t, e, r) { const i = f[r || ""] || f[""]; a[o] < a[i.minimalSeverity] || (i.logDateTime && (t = `${new Date().toISOString()}|${t}`), i.logSourceLocation, u[i.provider].log(o, t, r)) }(function(o) { function t(r) { f = {}, e("", r || {}) } function e(r, i) { if (r === "*") t(i); else { const d = f[r] || c; f[r] = { provider: i.provider || d.provider, minimalSeverity: i.minimalSeverity || d.minimalSeverity, logDateTime: i.logDateTime === void 0 ? d.logDateTime : i.logDateTime, logSourceLocation: i.logSourceLocation === void 0 ? d.logSourceLocation : i.logSourceLocation } } } o.verbose = function(r, i) { o("verbose", r, i) }, o.info = function(r, i) { o("info", r, i) }, o.warning = function(r, i) { o("warning", r, i) }, o.error = function(r, i) { o("error", r, i) }, o.fatal = function(r, i) { o("fatal", r, i) }, o.reset = t, o.set = e, o.setWithEnv = function(r) { const i = {}; r.logLevel && (i.minimalSeverity = r.logLevel), e("", i) } })(s || (s = {})), n.Logger = s; class p { constructor(t, e, r, i, d, g) { this.category = t, this.name = e, this.startTime = r, this.endCallback = i, this.timer = d, this.ctx = g } end() { return this.endCallback(this) } async checkTimer() { if (this.ctx === void 0 || this.timer === void 0) throw new Error("No webgl timer found"); return this.ctx.endTimer(), this.ctx.waitForQueryAndGetTime(this.timer) } } class l { constructor(t, e, r, i) { this.category = t, this.name = e, this.startTime = r, this.endTime = i } } n.Profiler = class { static create(o) { return o === void 0 ? new this : new this(o.maxNumberEvents, o.flushBatchSize, o.flushIntervalInMilliseconds) } constructor(o, t, e) { this._started = !1, this._flushPointer = 0, this._started = !1, this._maxNumberEvents = o === void 0 ? 1e4 : o, this._flushBatchSize = t === void 0 ? 10 : t, this._flushIntervalInMilliseconds = e === void 0 ? 5e3 : e } start() { this._started = !0, this._timingEvents = [], this._flushTime = (0, n.now)(), this._flushPointer = 0 } stop() { for (this._started = !1; this._flushPointer < this._timingEvents.length; this._flushPointer++) this.logOneEvent(this._timingEvents[this._flushPointer]) } event(o, t, e, r) { const i = this._started ? this.begin(o, t, r) : void 0; let d = !1; const g = e(); if (g && typeof g.then == "function") return d = !0, new Promise((m, _) => { g.then(async y => { i && await i.end(), m(y) }, async y => { i && await i.end(), _(y) }) }); if (!d && i) { const m = i.end(); if (m && typeof m.then == "function") return new Promise((_, y) => { m.then(() => { _(g) }, T => { y(T) }) }) } return g } begin(o, t, e) { if (!this._started) throw new Error("profiler is not started yet"); if (e === void 0) { const r = (0, n.now)(); return this.flush(r), new p(o, t, r, i => this.endSync(i)) } { const r = e.beginTimer(); return new p(o, t, 0, async i => this.end(i), r, e) } } async end(o) { const t = await o.checkTimer(); this._timingEvents.length < this._maxNumberEvents && (this._timingEvents.push(new l(o.category, o.name, o.startTime, t)), this.flush(t)) } endSync(o) { const t = (0, n.now)(); this._timingEvents.length < this._maxNumberEvents && (this._timingEvents.push(new l(o.category, o.name, o.startTime, t)), this.flush(t)) } logOneEvent(o) { n.Logger.verbose(`Profiler.${o.category}`, `${(o.endTime-o.startTime).toFixed(2)}ms on event '${o.name}' at ${o.endTime.toFixed(2)}`) } flush(o) { if (this._timingEvents.length - this._flushPointer >= this._flushBatchSize || o - this._flushTime >= this._flushIntervalInMilliseconds) { for (const t = this._flushPointer; this._flushPointer < t + this._flushBatchSize && this._flushPointer < this._timingEvents.length; this._flushPointer++) this.logOneEvent(this._timingEvents[this._flushPointer]); this._flushTime = (0, n.now)() } } get started() { return this._started } }, n.now = typeof performance < "u" && performance.now ? () => performance.now() : Date.now }, 2644: (b, n, a) => { Object.defineProperty(n, "__esModule", { value: !0 }), n.Model = void 0; const u = a(5686), c = a(1446), f = a(7070), s = a(9395), h = a(2517); var p = s.onnxruntime.experimental.fbs; n.Model = class { constructor() {} load(l, o, t) { if (!t) try { return void this.loadFromOnnxFormat(l, o) } catch (e) { if (t !== void 0) throw e } this.loadFromOrtFormat(l, o) } loadFromOnnxFormat(l, o) { const t = c.onnx.ModelProto.decode(l); if (h.LongUtil.longToNumber(t.irVersion) < 3) throw new Error("only support ONNX model with IR_VERSION>=3"); this._opsets = t.opsetImport.map(e => ({ domain: e.domain, version: h.LongUtil.longToNumber(e.version) })), this._graph = f.Graph.from(t.graph, o) } loadFromOrtFormat(l, o) { const t = new u.flatbuffers.ByteBuffer(l), e = p.InferenceSession.getRootAsInferenceSession(t).model(); if (h.LongUtil.longToNumber(e.irVersion()) < 3) throw new Error("only support ONNX model with IR_VERSION>=3"); this._opsets = []; for (let r = 0; r < e.opsetImportLength(); r++) { const i = e.opsetImport(r); this._opsets.push({ domain: i?.domain(), version: h.LongUtil.longToNumber(i.version()) }) } this._graph = f.Graph.from(e.graph(), o) } get graph() { return this._graph } get opsets() { return this._opsets } } }, 782: (b, n) => { Object.defineProperty(n, "__esModule", { value: !0 }), n.FLOAT_TYPES = n.INT_TYPES = n.NUMBER_TYPES = void 0, n.NUMBER_TYPES = ["float32", "float64", "int32", "int16", "int8", "uint16", "uint32", "uint8"], n.INT_TYPES = ["int32", "int16", "int8", "uint16", "uint32", "uint8"], n.FLOAT_TYPES = ["float32", "float64"] }, 1047: (b, n) => { function a(u, c) { if (c.endsWith("+")) { const f = Number.parseInt(c.substring(0, c.length - 1), 10); return !isNaN(f) && f <= u } if (c.split("-").length === 2) { const f = c.split("-"), s = Number.parseInt(f[0], 10), h = Number.parseInt(f[1], 10); return !isNaN(s) && !isNaN(h) && s <= u && u <= h } return Number.parseInt(c, 10) === u } Object.defineProperty(n, "__esModule", { value: !0 }), n.resolveOperator = void 0, n.resolveOperator = function(u, c, f) { for (const s of f) { const h = s[0], p = s[1], l = s[2], o = s[3], t = s[4]; if (u.opType === h) { for (const e of c) if ((e.domain === p || e.domain === "ai.onnx" && p === "") && a(e.version, l)) return { opImpl: o, opInit: t } } } throw new TypeError(`cannot resolve operator '${u.opType}' with opsets: ${c.map(s=>`${s.domain||"ai.onnx"} v${s.version}`).join(", ")}`) } }, 9395: (b, n, a) => { Object.defineProperty(n, "__esModule", { value: !0 }), n.onnxruntime = void 0; const u = a(5686); var c, f; c = n.onnxruntime || (n.onnxruntime = {}), function(s) { (function(h) { h[h.UNDEFINED = 0] = "UNDEFINED", h[h.FLOAT = 1] = "FLOAT", h[h.INT = 2] = "INT", h[h.STRING = 3] = "STRING", h[h.TENSOR = 4] = "TENSOR", h[h.GRAPH = 5] = "GRAPH", h[h.FLOATS = 6] = "FLOATS", h[h.INTS = 7] = "INTS", h[h.STRINGS = 8] = "STRINGS", h[h.TENSORS = 9] = "TENSORS", h[h.GRAPHS = 10] = "GRAPHS", h[h.SPARSE_TENSOR = 11] = "SPARSE_TENSOR", h[h.SPARSE_TENSORS = 12] = "SPARSE_TENSORS" })(s.AttributeType || (s.AttributeType = {})) }((f = c.experimental || (c.experimental = {})).fbs || (f.fbs = {})), function(s) { (function(h) { (function(p) { (function(l) { l[l.UNKNOWN = 0] = "UNKNOWN", l[l.VALUE = 1] = "VALUE", l[l.PARAM = 2] = "PARAM" })(p.DimensionValueType || (p.DimensionValueType = {})) })(h.fbs || (h.fbs = {})) })(s.experimental || (s.experimental = {})) }(n.onnxruntime || (n.onnxruntime = {})), function(s) { (function(h) { (function(p) { (function(l) { l[l.UNDEFINED = 0] = "UNDEFINED", l[l.FLOAT = 1] = "FLOAT", l[l.UINT8 = 2] = "UINT8", l[l.INT8 = 3] = "INT8", l[l.UINT16 = 4] = "UINT16", l[l.INT16 = 5] = "INT16", l[l.INT32 = 6] = "INT32", l[l.INT64 = 7] = "INT64", l[l.STRING = 8] = "STRING", l[l.BOOL = 9] = "BOOL", l[l.FLOAT16 = 10] = "FLOAT16", l[l.DOUBLE = 11] = "DOUBLE", l[l.UINT32 = 12] = "UINT32", l[l.UINT64 = 13] = "UINT64", l[l.COMPLEX64 = 14] = "COMPLEX64", l[l.COMPLEX128 = 15] = "COMPLEX128", l[l.BFLOAT16 = 16] = "BFLOAT16" })(p.TensorDataType || (p.TensorDataType = {})) })(h.fbs || (h.fbs = {})) })(s.experimental || (s.experimental = {})) }(n.onnxruntime || (n.onnxruntime = {})), function(s) { (function(h) { (function(p) { (function(l) { l[l.Primitive = 0] = "Primitive", l[l.Fused = 1] = "Fused" })(p.NodeType || (p.NodeType = {})) })(h.fbs || (h.fbs = {})) })(s.experimental || (s.experimental = {})) }(n.onnxruntime || (n.onnxruntime = {})), function(s) { (function(h) { (function(p) { (function(l) { l[l.NONE = 0] = "NONE", l[l.tensor_type = 1] = "tensor_type", l[l.sequence_type = 2] = "sequence_type", l[l.map_type = 3] = "map_type" })(p.TypeInfoValue || (p.TypeInfoValue = {})) })(h.fbs || (h.fbs = {})) })(s.experimental || (s.experimental = {})) }(n.onnxruntime || (n.onnxruntime = {})), function(s) { (function(h) { (function(p) { class l { constructor() { this.bb = null, this.bb_pos = 0 } __init(t, e) { return this.bb_pos = t, this.bb = e, this } static getRootAsShape(t, e) { return (e || new l).__init(t.readInt32(t.position()) + t.position(), t) } static getSizePrefixedRootAsShape(t, e) { return t.setPosition(t.position() + u.flatbuffers.SIZE_PREFIX_LENGTH), (e || new l).__init(t.readInt32(t.position()) + t.position(), t) } dim(t, e) { let r = this.bb.__offset(this.bb_pos, 4); return r ? (e || new s.experimental.fbs.Dimension).__init(this.bb.__indirect(this.bb.__vector(this.bb_pos + r) + 4 * t), this.bb) : null } dimLength() { let t = this.bb.__offset(this.bb_pos, 4); return t ? this.bb.__vector_len(this.bb_pos + t) : 0 } static startShape(t) { t.startObject(1) } static addDim(t, e) { t.addFieldOffset(0, e, 0) } static createDimVector(t, e) { t.startVector(4, e.length, 4); for (let r = e.length - 1; r >= 0; r--) t.addOffset(e[r]); return t.endVector() } static startDimVector(t, e) { t.startVector(4, e, 4) } static endShape(t) { return t.endObject() } static createShape(t, e) { return l.startShape(t), l.addDim(t, e), l.endShape(t) } } p.Shape = l })(h.fbs || (h.fbs = {})) })(s.experimental || (s.experimental = {})) }(n.onnxruntime || (n.onnxruntime = {})), function(s) { (function(h) { (function(p) { class l { constructor() { this.bb = null, this.bb_pos = 0 } __init(t, e) { return this.bb_pos = t, this.bb = e, this } static getRootAsDimension(t, e) { return (e || new l).__init(t.readInt32(t.position()) + t.position(), t) } static getSizePrefixedRootAsDimension(t, e) { return t.setPosition(t.position() + u.flatbuffers.SIZE_PREFIX_LENGTH), (e || new l).__init(t.readInt32(t.position()) + t.position(), t) } value(t) { let e = this.bb.__offset(this.bb_pos, 4); return e ? (t || new s.experimental.fbs.DimensionValue).__init(this.bb.__indirect(this.bb_pos + e), this.bb) : null } denotation(t) { let e = this.bb.__offset(this.bb_pos, 6); return e ? this.bb.__string(this.bb_pos + e, t) : null } static startDimension(t) { t.startObject(2) } static addValue(t, e) { t.addFieldOffset(0, e, 0) } static addDenotation(t, e) { t.addFieldOffset(1, e, 0) } static endDimension(t) { return t.endObject() } static createDimension(t, e, r) { return l.startDimension(t), l.addValue(t, e), l.addDenotation(t, r), l.endDimension(t) } } p.Dimension = l })(h.fbs || (h.fbs = {})) })(s.experimental || (s.experimental = {})) }(n.onnxruntime || (n.onnxruntime = {})), function(s) { (function(h) { (function(p) { class l { constructor() { this.bb = null, this.bb_pos = 0 } __init(t, e) { return this.bb_pos = t, this.bb = e, this } static getRootAsDimensionValue(t, e) { return (e || new l).__init(t.readInt32(t.position()) + t.position(), t) } static getSizePrefixedRootAsDimensionValue(t, e) { return t.setPosition(t.position() + u.flatbuffers.SIZE_PREFIX_LENGTH), (e || new l).__init(t.readInt32(t.position()) + t.position(), t) } dimType() { let t = this.bb.__offset(this.bb_pos, 4); return t ? this.bb.readInt8(this.bb_pos + t) : s.experimental.fbs.DimensionValueType.UNKNOWN } dimValue() { let t = this.bb.__offset(this.bb_pos, 6); return t ? this.bb.readInt64(this.bb_pos + t) : this.bb.createLong(0, 0) } dimParam(t) { let e = this.bb.__offset(this.bb_pos, 8); return e ? this.bb.__string(this.bb_pos + e, t) : null } static startDimensionValue(t) { t.startObject(3) } static addDimType(t, e) { t.addFieldInt8(0, e, s.experimental.fbs.DimensionValueType.UNKNOWN) } static addDimValue(t, e) { t.addFieldInt64(1, e, t.createLong(0, 0)) } static addDimParam(t, e) { t.addFieldOffset(2, e, 0) } static endDimensionValue(t) { return t.endObject() } static createDimensionValue(t, e, r, i) { return l.startDimensionValue(t), l.addDimType(t, e), l.addDimValue(t, r), l.addDimParam(t, i), l.endDimensionValue(t) } } p.DimensionValue = l })(h.fbs || (h.fbs = {})) })(s.experimental || (s.experimental = {})) }(n.onnxruntime || (n.onnxruntime = {})), function(s) { (function(h) { (function(p) { class l { constructor() { this.bb = null, this.bb_pos = 0 } __init(t, e) { return this.bb_pos = t, this.bb = e, this } static getRootAsTensorTypeAndShape(t, e) { return (e || new l).__init(t.readInt32(t.position()) + t.position(), t) } static getSizePrefixedRootAsTensorTypeAndShape(t, e) { return t.setPosition(t.position() + u.flatbuffers.SIZE_PREFIX_LENGTH), (e || new l).__init(t.readInt32(t.position()) + t.position(), t) } elemType() { let t = this.bb.__offset(this.bb_pos, 4); return t ? this.bb.readInt32(this.bb_pos + t) : s.experimental.fbs.TensorDataType.UNDEFINED } shape(t) { let e = this.bb.__offset(this.bb_pos, 6); return e ? (t || new s.experimental.fbs.Shape).__init(this.bb.__indirect(this.bb_pos + e), this.bb) : null } static startTensorTypeAndShape(t) { t.startObject(2) } static addElemType(t, e) { t.addFieldInt32(0, e, s.experimental.fbs.TensorDataType.UNDEFINED) } static addShape(t, e) { t.addFieldOffset(1, e, 0) } static endTensorTypeAndShape(t) { return t.endObject() } static createTensorTypeAndShape(t, e, r) { return l.startTensorTypeAndShape(t), l.addElemType(t, e), l.addShape(t, r), l.endTensorTypeAndShape(t) } } p.TensorTypeAndShape = l })(h.fbs || (h.fbs = {})) })(s.experimental || (s.experimental = {})) }(n.onnxruntime || (n.onnxruntime = {})), function(s) { (function(h) { (function(p) { class l { constructor() { this.bb = null, this.bb_pos = 0 } __init(t, e) { return this.bb_pos = t, this.bb = e, this } static getRootAsMapType(t, e) { return (e || new l).__init(t.readInt32(t.position()) + t.position(), t) } static getSizePrefixedRootAsMapType(t, e) { return t.setPosition(t.position() + u.flatbuffers.SIZE_PREFIX_LENGTH), (e || new l).__init(t.readInt32(t.position()) + t.position(), t) } keyType() { let t = this.bb.__offset(this.bb_pos, 4); return t ? this.bb.readInt32(this.bb_pos + t) : s.experimental.fbs.TensorDataType.UNDEFINED } valueType(t) { let e = this.bb.__offset(this.bb_pos, 6); return e ? (t || new s.experimental.fbs.TypeInfo).__init(this.bb.__indirect(this.bb_pos + e), this.bb) : null } static startMapType(t) { t.startObject(2) } static addKeyType(t, e) { t.addFieldInt32(0, e, s.experimental.fbs.TensorDataType.UNDEFINED) } static addValueType(t, e) { t.addFieldOffset(1, e, 0) } static endMapType(t) { return t.endObject() } static createMapType(t, e, r) { return l.startMapType(t), l.addKeyType(t, e), l.addValueType(t, r), l.endMapType(t) } } p.MapType = l })(h.fbs || (h.fbs = {})) })(s.experimental || (s.experimental = {})) }(n.onnxruntime || (n.onnxruntime = {})), function(s) { (function(h) { (function(p) { class l { constructor() { this.bb = null, this.bb_pos = 0 } __init(t, e) { return this.bb_pos = t, this.bb = e, this } static getRootAsSequenceType(t, e) { return (e || new l).__init(t.readInt32(t.position()) + t.position(), t) } static getSizePrefixedRootAsSequenceType(t, e) { return t.setPosition(t.position() + u.flatbuffers.SIZE_PREFIX_LENGTH), (e || new l).__init(t.readInt32(t.position()) + t.position(), t) } elemType(t) { let e = this.bb.__offset(this.bb_pos, 4); return e ? (t || new s.experimental.fbs.TypeInfo).__init(this.bb.__indirect(this.bb_pos + e), this.bb) : null } static startSequenceType(t) { t.startObject(1) } static addElemType(t, e) { t.addFieldOffset(0, e, 0) } static endSequenceType(t) { return t.endObject() } static createSequenceType(t, e) { return l.startSequenceType(t), l.addElemType(t, e), l.endSequenceType(t) } } p.SequenceType = l })(h.fbs || (h.fbs = {})) })(s.experimental || (s.experimental = {})) }(n.onnxruntime || (n.onnxruntime = {})), function(s) { (function(h) { (h.fbs || (h.fbs = {})).EdgeEnd = class { constructor() { this.bb = null, this.bb_pos = 0 } __init(p, l) { return this.bb_pos = p, this.bb = l, this } nodeIndex() { return this.bb.readUint32(this.bb_pos) } srcArgIndex() { return this.bb.readInt32(this.bb_pos + 4) } dstArgIndex() { return this.bb.readInt32(this.bb_pos + 8) } static createEdgeEnd(p, l, o, t) { return p.prep(4, 12), p.writeInt32(t), p.writeInt32(o), p.writeInt32(l), p.offset() } } })(s.experimental || (s.experimental = {})) }(n.onnxruntime || (n.onnxruntime = {})), function(s) { (function(h) { (function(p) { class l { constructor() { this.bb = null, this.bb_pos = 0 } __init(t, e) { return this.bb_pos = t, this.bb = e, this } static getRootAsNodeEdge(t, e) { return (e || new l).__init(t.readInt32(t.position()) + t.position(), t) } static getSizePrefixedRootAsNodeEdge(t, e) { return t.setPosition(t.position() + u.flatbuffers.SIZE_PREFIX_LENGTH), (e || new l).__init(t.readInt32(t.position()) + t.position(), t) } nodeIndex() { let t = this.bb.__offset(this.bb_pos, 4); return t ? this.bb.readUint32(this.bb_pos + t) : 0 } inputEdges(t, e) { let r = this.bb.__offset(this.bb_pos, 6); return r ? (e || new s.experimental.fbs.EdgeEnd).__init(this.bb.__vector(this.bb_pos + r) + 12 * t, this.bb) : null } inputEdgesLength() { let t = this.bb.__offset(this.bb_pos, 6); return t ? this.bb.__vector_len(this.bb_pos + t) : 0 } outputEdges(t, e) { let r = this.bb.__offset(this.bb_pos, 8); return r ? (e || new s.experimental.fbs.EdgeEnd).__init(this.bb.__vector(this.bb_pos + r) + 12 * t, this.bb) : null } outputEdgesLength() { let t = this.bb.__offset(this.bb_pos, 8); return t ? this.bb.__vector_len(this.bb_pos + t) : 0 } static startNodeEdge(t) { t.startObject(3) } static addNodeIndex(t, e) { t.addFieldInt32(0, e, 0) } static addInputEdges(t, e) { t.addFieldOffset(1, e, 0) } static startInputEdgesVector(t, e) { t.startVector(12, e, 4) } static addOutputEdges(t, e) { t.addFieldOffset(2, e, 0) } static startOutputEdgesVector(t, e) { t.startVector(12, e, 4) } static endNodeEdge(t) { return t.endObject() } static createNodeEdge(t, e, r, i) { return l.startNodeEdge(t), l.addNodeIndex(t, e), l.addInputEdges(t, r), l.addOutputEdges(t, i), l.endNodeEdge(t) } } p.NodeEdge = l })(h.fbs || (h.fbs = {})) })(s.experimental || (s.experimental = {})) }(n.onnxruntime || (n.onnxruntime = {})), function(s) { (function(h) { (function(p) { class l { constructor() { this.bb = null, this.bb_pos = 0 } __init(t, e) { return this.bb_pos = t, this.bb = e, this } static getRootAsNode(t, e) { return (e || new l).__init(t.readInt32(t.position()) + t.position(), t) } static getSizePrefixedRootAsNode(t, e) { return t.setPosition(t.position() + u.flatbuffers.SIZE_PREFIX_LENGTH), (e || new l).__init(t.readInt32(t.position()) + t.position(), t) } name(t) { let e = this.bb.__offset(this.bb_pos, 4); return e ? this.bb.__string(this.bb_pos + e, t) : null } docString(t) { let e = this.bb.__offset(this.bb_pos, 6); return e ? this.bb.__string(this.bb_pos + e, t) : null } domain(t) { let e = this.bb.__offset(this.bb_pos, 8); return e ? this.bb.__string(this.bb_pos + e, t) : null } sinceVersion() { let t = this.bb.__offset(this.bb_pos, 10); return t ? this.bb.readInt32(this.bb_pos + t) : 0 } index() { let t = this.bb.__offset(this.bb_pos, 12); return t ? this.bb.readUint32(this.bb_pos + t) : 0 } opType(t) { let e = this.bb.__offset(this.bb_pos, 14); return e ? this.bb.__string(this.bb_pos + e, t) : null } type() { let t = this.bb.__offset(this.bb_pos, 16); return t ? this.bb.readInt32(this.bb_pos + t) : s.experimental.fbs.NodeType.Primitive } executionProviderType(t) { let e = this.bb.__offset(this.bb_pos, 18); return e ? this.bb.__string(this.bb_pos + e, t) : null } inputs(t, e) { let r = this.bb.__offset(this.bb_pos, 20); return r ? this.bb.__string(this.bb.__vector(this.bb_pos + r) + 4 * t, e) : null } inputsLength() { let t = this.bb.__offset(this.bb_pos, 20); return t ? this.bb.__vector_len(this.bb_pos + t) : 0 } outputs(t, e) { let r = this.bb.__offset(this.bb_pos, 22); return r ? this.bb.__string(this.bb.__vector(this.bb_pos + r) + 4 * t, e) : null } outputsLength() { let t = this.bb.__offset(this.bb_pos, 22); return t ? this.bb.__vector_len(this.bb_pos + t) : 0 } attributes(t, e) { let r = this.bb.__offset(this.bb_pos, 24); return r ? (e || new s.experimental.fbs.Attribute).__init(this.bb.__indirect(this.bb.__vector(this.bb_pos + r) + 4 * t), this.bb) : null } attributesLength() { let t = this.bb.__offset(this.bb_pos, 24); return t ? this.bb.__vector_len(this.bb_pos + t) : 0 } inputArgCounts(t) { let e = this.bb.__offset(this.bb_pos, 26); return e ? this.bb.readInt32(this.bb.__vector(this.bb_pos + e) + 4 * t) : 0 } inputArgCountsLength() { let t = this.bb.__offset(this.bb_pos, 26); return t ? this.bb.__vector_len(this.bb_pos + t) : 0 } inputArgCountsArray() { let t = this.bb.__offset(this.bb_pos, 26); return t ? new Int32Array(this.bb.bytes().buffer, this.bb.bytes().byteOffset + this.bb.__vector(this.bb_pos + t), this.bb.__vector_len(this.bb_pos + t)) : null } implicitInputs(t, e) { let r = this.bb.__offset(this.bb_pos, 28); return r ? this.bb.__string(this.bb.__vector(this.bb_pos + r) + 4 * t, e) : null } implicitInputsLength() { let t = this.bb.__offset(this.bb_pos, 28); return t ? this.bb.__vector_len(this.bb_pos + t) : 0 } static startNode(t) { t.startObject(13) } static addName(t, e) { t.addFieldOffset(0, e, 0) } static addDocString(t, e) { t.addFieldOffset(1, e, 0) } static addDomain(t, e) { t.addFieldOffset(2, e, 0) } static addSinceVersion(t, e) { t.addFieldInt32(3, e, 0) } static addIndex(t, e) { t.addFieldInt32(4, e, 0) } static addOpType(t, e) { t.addFieldOffset(5, e, 0) } static addType(t, e) { t.addFieldInt32(6, e, s.experimental.fbs.NodeType.Primitive) } static addExecutionProviderType(t, e) { t.addFieldOffset(7, e, 0) } static addInputs(t, e) { t.addFieldOffset(8, e, 0) } static createInputsVector(t, e) { t.startVector(4, e.length, 4); for (let r = e.length - 1; r >= 0; r--) t.addOffset(e[r]); return t.endVector() } static startInputsVector(t, e) { t.startVector(4, e, 4) } static addOutputs(t, e) { t.addFieldOffset(9, e, 0) } static createOutputsVector(t, e) { t.startVector(4, e.length, 4); for (let r = e.length - 1; r >= 0; r--) t.addOffset(e[r]); return t.endVector() } static startOutputsVector(t, e) { t.startVector(4, e, 4) } static addAttributes(t, e) { t.addFieldOffset(10, e, 0) } static createAttributesVector(t, e) { t.startVector(4, e.length, 4); for (let r = e.length - 1; r >= 0; r--) t.addOffset(e[r]); return t.endVector() } static startAttributesVector(t, e) { t.startVector(4, e, 4) } static addInputArgCounts(t, e) { t.addFieldOffset(11, e, 0) } static createInputArgCountsVector(t, e) { t.startVector(4, e.length, 4); for (let r = e.length - 1; r >= 0; r--) t.addInt32(e[r]); return t.endVector() } static startInputArgCountsVector(t, e) { t.startVector(4, e, 4) } static addImplicitInputs(t, e) { t.addFieldOffset(12, e, 0) } static createImplicitInputsVector(t, e) { t.startVector(4, e.length, 4); for (let r = e.length - 1; r >= 0; r--) t.addOffset(e[r]); return t.endVector() } static startImplicitInputsVector(t, e) { t.startVector(4, e, 4) } static endNode(t) { return t.endObject() } static createNode(t, e, r, i, d, g, m, _, y, T, w, S, O, E) { return l.startNode(t), l.addName(t, e), l.addDocString(t, r), l.addDomain(t, i), l.addSinceVersion(t, d), l.addIndex(t, g), l.addOpType(t, m), l.addType(t, _), l.addExecutionProviderType(t, y), l.addInputs(t, T), l.addOutputs(t, w), l.addAttributes(t, S), l.addInputArgCounts(t, O), l.addImplicitInputs(t, E), l.endNode(t) } } p.Node = l })(h.fbs || (h.fbs = {})) })(s.experimental || (s.experimental = {})) }(n.onnxruntime || (n.onnxruntime = {})), function(s) { (function(h) { (function(p) { class l { constructor() { this.bb = null, this.bb_pos = 0 } __init(t, e) { return this.bb_pos = t, this.bb = e, this } static getRootAsValueInfo(t, e) { return (e || new l).__init(t.readInt32(t.position()) + t.position(), t) } static getSizePrefixedRootAsValueInfo(t, e) { return t.setPosition(t.position() + u.flatbuffers.SIZE_PREFIX_LENGTH), (e || new l).__init(t.readInt32(t.position()) + t.position(), t) } name(t) { let e = this.bb.__offset(this.bb_pos, 4); return e ? this.bb.__string(this.bb_pos + e, t) : null } docString(t) { let e = this.bb.__offset(this.bb_pos, 6); return e ? this.bb.__string(this.bb_pos + e, t) : null } type(t) { let e = this.bb.__offset(this.bb_pos, 8); return e ? (t || new s.experimental.fbs.TypeInfo).__init(this.bb.__indirect(this.bb_pos + e), this.bb) : null } static startValueInfo(t) { t.startObject(3) } static addName(t, e) { t.addFieldOffset(0, e, 0) } static addDocString(t, e) { t.addFieldOffset(1, e, 0) } static addType(t, e) { t.addFieldOffset(2, e, 0) } static endValueInfo(t) { return t.endObject() } static createValueInfo(t, e, r, i) { return l.startValueInfo(t), l.addName(t, e), l.addDocString(t, r), l.addType(t, i), l.endValueInfo(t) } } p.ValueInfo = l })(h.fbs || (h.fbs = {})) })(s.experimental || (s.experimental = {})) }(n.onnxruntime || (n.onnxruntime = {})), function(s) { (function(h) { (function(p) { class l { constructor() { this.bb = null, this.bb_pos = 0 } __init(t, e) { return this.bb_pos = t, this.bb = e, this } static getRootAsTypeInfo(t, e) { return (e || new l).__init(t.readInt32(t.position()) + t.position(), t) } static getSizePrefixedRootAsTypeInfo(t, e) { return t.setPosition(t.position() + u.flatbuffers.SIZE_PREFIX_LENGTH), (e || new l).__init(t.readInt32(t.position()) + t.position(), t) } denotation(t) { let e = this.bb.__offset(this.bb_pos, 4); return e ? this.bb.__string(this.bb_pos + e, t) : null } valueType() { let t = this.bb.__offset(this.bb_pos, 6); return t ? this.bb.readUint8(this.bb_pos + t) : s.experimental.fbs.TypeInfoValue.NONE } value(t) { let e = this.bb.__offset(this.bb_pos, 8); return e ? this.bb.__union(t, this.bb_pos + e) : null } static startTypeInfo(t) { t.startObject(3) } static addDenotation(t, e) { t.addFieldOffset(0, e, 0) } static addValueType(t, e) { t.addFieldInt8(1, e, s.experimental.fbs.TypeInfoValue.NONE) } static addValue(t, e) { t.addFieldOffset(2, e, 0) } static endTypeInfo(t) { return t.endObject() } static createTypeInfo(t, e, r, i) { return l.startTypeInfo(t), l.addDenotation(t, e), l.addValueType(t, r), l.addValue(t, i), l.endTypeInfo(t) } } p.TypeInfo = l })(h.fbs || (h.fbs = {})) })(s.experimental || (s.experimental = {})) }(n.onnxruntime || (n.onnxruntime = {})), function(s) { (function(h) { (function(p) { class l { constructor() { this.bb = null, this.bb_pos = 0 } __init(t, e) { return this.bb_pos = t, this.bb = e, this } static getRootAsOperatorSetId(t, e) { return (e || new l).__init(t.readInt32(t.position()) + t.position(), t) } static getSizePrefixedRootAsOperatorSetId(t, e) { return t.setPosition(t.position() + u.flatbuffers.SIZE_PREFIX_LENGTH), (e || new l).__init(t.readInt32(t.position()) + t.position(), t) } domain(t) { let e = this.bb.__offset(this.bb_pos, 4); return e ? this.bb.__string(this.bb_pos + e, t) : null } version() { let t = this.bb.__offset(this.bb_pos, 6); return t ? this.bb.readInt64(this.bb_pos + t) : this.bb.createLong(0, 0) } static startOperatorSetId(t) { t.startObject(2) } static addDomain(t, e) { t.addFieldOffset(0, e, 0) } static addVersion(t, e) { t.addFieldInt64(1, e, t.createLong(0, 0)) } static endOperatorSetId(t) { return t.endObject() } static createOperatorSetId(t, e, r) { return l.startOperatorSetId(t), l.addDomain(t, e), l.addVersion(t, r), l.endOperatorSetId(t) } } p.OperatorSetId = l })(h.fbs || (h.fbs = {})) })(s.experimental || (s.experimental = {})) }(n.onnxruntime || (n.onnxruntime = {})), function(s) { (function(h) { (function(p) { class l { constructor() { this.bb = null, this.bb_pos = 0 } __init(t, e) { return this.bb_pos = t, this.bb = e, this } static getRootAsTensor(t, e) { return (e || new l).__init(t.readInt32(t.position()) + t.position(), t) } static getSizePrefixedRootAsTensor(t, e) { return t.setPosition(t.position() + u.flatbuffers.SIZE_PREFIX_LENGTH), (e || new l).__init(t.readInt32(t.position()) + t.position(), t) } name(t) { let e = this.bb.__offset(this.bb_pos, 4); return e ? this.bb.__string(this.bb_pos + e, t) : null } docString(t) { let e = this.bb.__offset(this.bb_pos, 6); return e ? this.bb.__string(this.bb_pos + e, t) : null } dims(t) { let e = this.bb.__offset(this.bb_pos, 8); return e ? this.bb.readInt64(this.bb.__vector(this.bb_pos + e) + 8 * t) : this.bb.createLong(0, 0) } dimsLength() { let t = this.bb.__offset(this.bb_pos, 8); return t ? this.bb.__vector_len(this.bb_pos + t) : 0 } dataType() { let t = this.bb.__offset(this.bb_pos, 10); return t ? this.bb.readInt32(this.bb_pos + t) : s.experimental.fbs.TensorDataType.UNDEFINED } rawData(t) { let e = this.bb.__offset(this.bb_pos, 12); return e ? this.bb.readUint8(this.bb.__vector(this.bb_pos + e) + t) : 0 } rawDataLength() { let t = this.bb.__offset(this.bb_pos, 12); return t ? this.bb.__vector_len(this.bb_pos + t) : 0 } rawDataArray() { let t = this.bb.__offset(this.bb_pos, 12); return t ? new Uint8Array(this.bb.bytes().buffer, this.bb.bytes().byteOffset + this.bb.__vector(this.bb_pos + t), this.bb.__vector_len(this.bb_pos + t)) : null } stringData(t, e) { let r = this.bb.__offset(this.bb_pos, 14); return r ? this.bb.__string(this.bb.__vector(this.bb_pos + r) + 4 * t, e) : null } stringDataLength() { let t = this.bb.__offset(this.bb_pos, 14); return t ? this.bb.__vector_len(this.bb_pos + t) : 0 } static startTensor(t) { t.startObject(6) } static addName(t, e) { t.addFieldOffset(0, e, 0) } static addDocString(t, e) { t.addFieldOffset(1, e, 0) } static addDims(t, e) { t.addFieldOffset(2, e, 0) } static createDimsVector(t, e) { t.startVector(8, e.length, 8); for (let r = e.length - 1; r >= 0; r--) t.addInt64(e[r]); return t.endVector() } static startDimsVector(t, e) { t.startVector(8, e, 8) } static addDataType(t, e) { t.addFieldInt32(3, e, s.experimental.fbs.TensorDataType.UNDEFINED) } static addRawData(t, e) { t.addFieldOffset(4, e, 0) } static createRawDataVector(t, e) { t.startVector(1, e.length, 1); for (let r = e.length - 1; r >= 0; r--) t.addInt8(e[r]); return t.endVector() } static startRawDataVector(t, e) { t.startVector(1, e, 1) } static addStringData(t, e) { t.addFieldOffset(5, e, 0) } static createStringDataVector(t, e) { t.startVector(4, e.length, 4); for (let r = e.length - 1; r >= 0; r--) t.addOffset(e[r]); return t.endVector() } static startStringDataVector(t, e) { t.startVector(4, e, 4) } static endTensor(t) { return t.endObject() } static createTensor(t, e, r, i, d, g, m) { return l.startTensor(t), l.addName(t, e), l.addDocString(t, r), l.addDims(t, i), l.addDataType(t, d), l.addRawData(t, g), l.addStringData(t, m), l.endTensor(t) } } p.Tensor = l })(h.fbs || (h.fbs = {})) })(s.experimental || (s.experimental = {})) }(n.onnxruntime || (n.onnxruntime = {})), function(s) { (function(h) { (function(p) { class l { constructor() { this.bb = null, this.bb_pos = 0 } __init(t, e) { return this.bb_pos = t, this.bb = e, this } static getRootAsSparseTensor(t, e) { return (e || new l).__init(t.readInt32(t.position()) + t.position(), t) } static getSizePrefixedRootAsSparseTensor(t, e) { return t.setPosition(t.position() + u.flatbuffers.SIZE_PREFIX_LENGTH), (e || new l).__init(t.readInt32(t.position()) + t.position(), t) } values(t) { let e = this.bb.__offset(this.bb_pos, 4); return e ? (t || new s.experimental.fbs.Tensor).__init(this.bb.__indirect(this.bb_pos + e), this.bb) : null } indices(t) { let e = this.bb.__offset(this.bb_pos, 6); return e ? (t || new s.experimental.fbs.Tensor).__init(this.bb.__indirect(this.bb_pos + e), this.bb) : null } dims(t) { let e = this.bb.__offset(this.bb_pos, 8); return e ? this.bb.readInt64(this.bb.__vector(this.bb_pos + e) + 8 * t) : this.bb.createLong(0, 0) } dimsLength() { let t = this.bb.__offset(this.bb_pos, 8); return t ? this.bb.__vector_len(this.bb_pos + t) : 0 } static startSparseTensor(t) { t.startObject(3) } static addValues(t, e) { t.addFieldOffset(0, e, 0) } static addIndices(t, e) { t.addFieldOffset(1, e, 0) } static addDims(t, e) { t.addFieldOffset(2, e, 0) } static createDimsVector(t, e) { t.startVector(8, e.length, 8); for (let r = e.length - 1; r >= 0; r--) t.addInt64(e[r]); return t.endVector() } static startDimsVector(t, e) { t.startVector(8, e, 8) } static endSparseTensor(t) { return t.endObject() } static createSparseTensor(t, e, r, i) { return l.startSparseTensor(t), l.addValues(t, e), l.addIndices(t, r), l.addDims(t, i), l.endSparseTensor(t) } } p.SparseTensor = l })(h.fbs || (h.fbs = {})) })(s.experimental || (s.experimental = {})) }(n.onnxruntime || (n.onnxruntime = {})), function(s) { (function(h) { (function(p) { class l { constructor() { this.bb = null, this.bb_pos = 0 } __init(t, e) { return this.bb_pos = t, this.bb = e, this } static getRootAsAttribute(t, e) { return (e || new l).__init(t.readInt32(t.position()) + t.position(), t) } static getSizePrefixedRootAsAttribute(t, e) { return t.setPosition(t.position() + u.flatbuffers.SIZE_PREFIX_LENGTH), (e || new l).__init(t.readInt32(t.position()) + t.position(), t) } name(t) { let e = this.bb.__offset(this.bb_pos, 4); return e ? this.bb.__string(this.bb_pos + e, t) : null } docString(t) { let e = this.bb.__offset(this.bb_pos, 6); return e ? this.bb.__string(this.bb_pos + e, t) : null } type() { let t = this.bb.__offset(this.bb_pos, 8); return t ? this.bb.readInt32(this.bb_pos + t) : s.experimental.fbs.AttributeType.UNDEFINED } f() { let t = this.bb.__offset(this.bb_pos, 10); return t ? this.bb.readFloat32(this.bb_pos + t) : 0 } i() { let t = this.bb.__offset(this.bb_pos, 12); return t ? this.bb.readInt64(this.bb_pos + t) : this.bb.createLong(0, 0) } s(t) { let e = this.bb.__offset(this.bb_pos, 14); return e ? this.bb.__string(this.bb_pos + e, t) : null } t(t) { let e = this.bb.__offset(this.bb_pos, 16); return e ? (t || new s.experimental.fbs.Tensor).__init(this.bb.__indirect(this.bb_pos + e), this.bb) : null } g(t) { let e = this.bb.__offset(this.bb_pos, 18); return e ? (t || new s.experimental.fbs.Graph).__init(this.bb.__indirect(this.bb_pos + e), this.bb) : null } floats(t) { let e = this.bb.__offset(this.bb_pos, 20); return e ? this.bb.readFloat32(this.bb.__vector(this.bb_pos + e) + 4 * t) : 0 } floatsLength() { let t = this.bb.__offset(this.bb_pos, 20); return t ? this.bb.__vector_len(this.bb_pos + t) : 0 } floatsArray() { let t = this.bb.__offset(this.bb_pos, 20); return t ? new Float32Array(this.bb.bytes().buffer, this.bb.bytes().byteOffset + this.bb.__vector(this.bb_pos + t), this.bb.__vector_len(this.bb_pos + t)) : null } ints(t) { let e = this.bb.__offset(this.bb_pos, 22); return e ? this.bb.readInt64(this.bb.__vector(this.bb_pos + e) + 8 * t) : this.bb.createLong(0, 0) } intsLength() { let t = this.bb.__offset(this.bb_pos, 22); return t ? this.bb.__vector_len(this.bb_pos + t) : 0 } strings(t, e) { let r = this.bb.__offset(this.bb_pos, 24); return r ? this.bb.__string(this.bb.__vector(this.bb_pos + r) + 4 * t, e) : null } stringsLength() { let t = this.bb.__offset(this.bb_pos, 24); return t ? this.bb.__vector_len(this.bb_pos + t) : 0 } tensors(t, e) { let r = this.bb.__offset(this.bb_pos, 26); return r ? (e || new s.experimental.fbs.Tensor).__init(this.bb.__indirect(this.bb.__vector(this.bb_pos + r) + 4 * t), this.bb) : null } tensorsLength() { let t = this.bb.__offset(this.bb_pos, 26); return t ? this.bb.__vector_len(this.bb_pos + t) : 0 } graphs(t, e) { let r = this.bb.__offset(this.bb_pos, 28); return r ? (e || new s.experimental.fbs.Graph).__init(this.bb.__indirect(this.bb.__vector(this.bb_pos + r) + 4 * t), this.bb) : null } graphsLength() { let t = this.bb.__offset(this.bb_pos, 28); return t ? this.bb.__vector_len(this.bb_pos + t) : 0 } static startAttribute(t) { t.startObject(13) } static addName(t, e) { t.addFieldOffset(0, e, 0) } static addDocString(t, e) { t.addFieldOffset(1, e, 0) } static addType(t, e) { t.addFieldInt32(2, e, s.experimental.fbs.AttributeType.UNDEFINED) } static addF(t, e) { t.addFieldFloat32(3, e, 0) } static addI(t, e) { t.addFieldInt64(4, e, t.createLong(0, 0)) } static addS(t, e) { t.addFieldOffset(5, e, 0) } static addT(t, e) { t.addFieldOffset(6, e, 0) } static addG(t, e) { t.addFieldOffset(7, e, 0) } static addFloats(t, e) { t.addFieldOffset(8, e, 0) } static createFloatsVector(t, e) { t.startVector(4, e.length, 4); for (let r = e.length - 1; r >= 0; r--) t.addFloat32(e[r]); return t.endVector() } static startFloatsVector(t, e) { t.startVector(4, e, 4) } static addInts(t, e) { t.addFieldOffset(9, e, 0) } static createIntsVector(t, e) { t.startVector(8, e.length, 8); for (let r = e.length - 1; r >= 0; r--) t.addInt64(e[r]); return t.endVector() } static startIntsVector(t, e) { t.startVector(8, e, 8) } static addStrings(t, e) { t.addFieldOffset(10, e, 0) } static createStringsVector(t, e) { t.startVector(4, e.length, 4); for (let r = e.length - 1; r >= 0; r--) t.addOffset(e[r]); return t.endVector() } static startStringsVector(t, e) { t.startVector(4, e, 4) } static addTensors(t, e) { t.addFieldOffset(11, e, 0) } static createTensorsVector(t, e) { t.startVector(4, e.length, 4); for (let r = e.length - 1; r >= 0; r--) t.addOffset(e[r]); return t.endVector() } static startTensorsVector(t, e) { t.startVector(4, e, 4) } static addGraphs(t, e) { t.addFieldOffset(12, e, 0) } static createGraphsVector(t, e) { t.startVector(4, e.length, 4); for (let r = e.length - 1; r >= 0; r--) t.addOffset(e[r]); return t.endVector() } static startGraphsVector(t, e) { t.startVector(4, e, 4) } static endAttribute(t) { return t.endObject() } static createAttribute(t, e, r, i, d, g, m, _, y, T, w, S, O, E) { return l.startAttribute(t), l.addName(t, e), l.addDocString(t, r), l.addType(t, i), l.addF(t, d), l.addI(t, g), l.addS(t, m), l.addT(t, _), l.addG(t, y), l.addFloats(t, T), l.addInts(t, w), l.addStrings(t, S), l.addTensors(t, O), l.addGraphs(t, E), l.endAttribute(t) } } p.Attribute = l })(h.fbs || (h.fbs = {})) })(s.experimental || (s.experimental = {})) }(n.onnxruntime || (n.onnxruntime = {})), function(s) { (function(h) { (function(p) { class l { constructor() { this.bb = null, this.bb_pos = 0 } __init(t, e) { return this.bb_pos = t, this.bb = e, this } static getRootAsGraph(t, e) { return (e || new l).__init(t.readInt32(t.position()) + t.position(), t) } static getSizePrefixedRootAsGraph(t, e) { return t.setPosition(t.position() + u.flatbuffers.SIZE_PREFIX_LENGTH), (e || new l).__init(t.readInt32(t.position()) + t.position(), t) } initializers(t, e) { let r = this.bb.__offset(this.bb_pos, 4); return r ? (e || new s.experimental.fbs.Tensor).__init(this.bb.__indirect(this.bb.__vector(this.bb_pos + r) + 4 * t), this.bb) : null } initializersLength() { let t = this.bb.__offset(this.bb_pos, 4); return t ? this.bb.__vector_len(this.bb_pos + t) : 0 } nodeArgs(t, e) { let r = this.bb.__offset(this.bb_pos, 6); return r ? (e || new s.experimental.fbs.ValueInfo).__init(this.bb.__indirect(this.bb.__vector(this.bb_pos + r) + 4 * t), this.bb) : null } nodeArgsLength() { let t = this.bb.__offset(this.bb_pos, 6); return t ? this.bb.__vector_len(this.bb_pos + t) : 0 } nodes(t, e) { let r = this.bb.__offset(this.bb_pos, 8); return r ? (e || new s.experimental.fbs.Node).__init(this.bb.__indirect(this.bb.__vector(this.bb_pos + r) + 4 * t), this.bb) : null } nodesLength() { let t = this.bb.__offset(this.bb_pos, 8); return t ? this.bb.__vector_len(this.bb_pos + t) : 0 } maxNodeIndex() { let t = this.bb.__offset(this.bb_pos, 10); return t ? this.bb.readUint32(this.bb_pos + t) : 0 } nodeEdges(t, e) { let r = this.bb.__offset(this.bb_pos, 12); return r ? (e || new s.experimental.fbs.NodeEdge).__init(this.bb.__indirect(this.bb.__vector(this.bb_pos + r) + 4 * t), this.bb) : null } nodeEdgesLength() { let t = this.bb.__offset(this.bb_pos, 12); return t ? this.bb.__vector_len(this.bb_pos + t) : 0 } inputs(t, e) { let r = this.bb.__offset(this.bb_pos, 14); return r ? this.bb.__string(this.bb.__vector(this.bb_pos + r) + 4 * t, e) : null } inputsLength() { let t = this.bb.__offset(this.bb_pos, 14); return t ? this.bb.__vector_len(this.bb_pos + t) : 0 } outputs(t, e) { let r = this.bb.__offset(this.bb_pos, 16); return r ? this.bb.__string(this.bb.__vector(this.bb_pos + r) + 4 * t, e) : null } outputsLength() { let t = this.bb.__offset(this.bb_pos, 16); return t ? this.bb.__vector_len(this.bb_pos + t) : 0 } sparseInitializers(t, e) { let r = this.bb.__offset(this.bb_pos, 18); return r ? (e || new s.experimental.fbs.SparseTensor).__init(this.bb.__indirect(this.bb.__vector(this.bb_pos + r) + 4 * t), this.bb) : null } sparseInitializersLength() { let t = this.bb.__offset(this.bb_pos, 18); return t ? this.bb.__vector_len(this.bb_pos + t) : 0 } static startGraph(t) { t.startObject(8) } static addInitializers(t, e) { t.addFieldOffset(0, e, 0) } static createInitializersVector(t, e) { t.startVector(4, e.length, 4); for (let r = e.length - 1; r >= 0; r--) t.addOffset(e[r]); return t.endVector() } static startInitializersVector(t, e) { t.startVector(4, e, 4) } static addNodeArgs(t, e) { t.addFieldOffset(1, e, 0) } static createNodeArgsVector(t, e) { t.startVector(4, e.length, 4); for (let r = e.length - 1; r >= 0; r--) t.addOffset(e[r]); return t.endVector() } static startNodeArgsVector(t, e) { t.startVector(4, e, 4) } static addNodes(t, e) { t.addFieldOffset(2, e, 0) } static createNodesVector(t, e) { t.startVector(4, e.length, 4); for (let r = e.length - 1; r >= 0; r--) t.addOffset(e[r]); return t.endVector() } static startNodesVector(t, e) { t.startVector(4, e, 4) } static addMaxNodeIndex(t, e) { t.addFieldInt32(3, e, 0) } static addNodeEdges(t, e) { t.addFieldOffset(4, e, 0) } static createNodeEdgesVector(t, e) { t.startVector(4, e.length, 4); for (let r = e.length - 1; r >= 0; r--) t.addOffset(e[r]); return t.endVector() } static startNodeEdgesVector(t, e) { t.startVector(4, e, 4) } static addInputs(t, e) { t.addFieldOffset(5, e, 0) } static createInputsVector(t, e) { t.startVector(4, e.length, 4); for (let r = e.length - 1; r >= 0; r--) t.addOffset(e[r]); return t.endVector() } static startInputsVector(t, e) { t.startVector(4, e, 4) } static addOutputs(t, e) { t.addFieldOffset(6, e, 0) } static createOutputsVector(t, e) { t.startVector(4, e.length, 4); for (let r = e.length - 1; r >= 0; r--) t.addOffset(e[r]); return t.endVector() } static startOutputsVector(t, e) { t.startVector(4, e, 4) } static addSparseInitializers(t, e) { t.addFieldOffset(7, e, 0) } static createSparseInitializersVector(t, e) { t.startVector(4, e.length, 4); for (let r = e.length - 1; r >= 0; r--) t.addOffset(e[r]); return t.endVector() } static startSparseInitializersVector(t, e) { t.startVector(4, e, 4) } static endGraph(t) { return t.endObject() } static createGraph(t, e, r, i, d, g, m, _, y) { return l.startGraph(t), l.addInitializers(t, e), l.addNodeArgs(t, r), l.addNodes(t, i), l.addMaxNodeIndex(t, d), l.addNodeEdges(t, g), l.addInputs(t, m), l.addOutputs(t, _), l.addSparseInitializers(t, y), l.endGraph(t) } } p.Graph = l })(h.fbs || (h.fbs = {})) })(s.experimental || (s.experimental = {})) }(n.onnxruntime || (n.onnxruntime = {})), function(s) { (function(h) { (function(p) { class l { constructor() { this.bb = null, this.bb_pos = 0 } __init(t, e) { return this.bb_pos = t, this.bb = e, this } static getRootAsModel(t, e) { return (e || new l).__init(t.readInt32(t.position()) + t.position(), t) } static getSizePrefixedRootAsModel(t, e) { return t.setPosition(t.position() + u.flatbuffers.SIZE_PREFIX_LENGTH), (e || new l).__init(t.readInt32(t.position()) + t.position(), t) } irVersion() { let t = this.bb.__offset(this.bb_pos, 4); return t ? this.bb.readInt64(this.bb_pos + t) : this.bb.createLong(0, 0) } opsetImport(t, e) { let r = this.bb.__offset(this.bb_pos, 6); return r ? (e || new s.experimental.fbs.OperatorSetId).__init(this.bb.__indirect(this.bb.__vector(this.bb_pos + r) + 4 * t), this.bb) : null } opsetImportLength() { let t = this.bb.__offset(this.bb_pos, 6); return t ? this.bb.__vector_len(this.bb_pos + t) : 0 } producerName(t) { let e = this.bb.__offset(this.bb_pos, 8); return e ? this.bb.__string(this.bb_pos + e, t) : null } producerVersion(t) { let e = this.bb.__offset(this.bb_pos, 10); return e ? this.bb.__string(this.bb_pos + e, t) : null } domain(t) { let e = this.bb.__offset(this.bb_pos, 12); return e ? this.bb.__string(this.bb_pos + e, t) : null } modelVersion() { let t = this.bb.__offset(this.bb_pos, 14); return t ? this.bb.readInt64(this.bb_pos + t) : this.bb.createLong(0, 0) } docString(t) { let e = this.bb.__offset(this.bb_pos, 16); return e ? this.bb.__string(this.bb_pos + e, t) : null } graph(t) { let e = this.bb.__offset(this.bb_pos, 18); return e ? (t || new s.experimental.fbs.Graph).__init(this.bb.__indirect(this.bb_pos + e), this.bb) : null } graphDocString(t) { let e = this.bb.__offset(this.bb_pos, 20); return e ? this.bb.__string(this.bb_pos + e, t) : null } static startModel(t) { t.startObject(9) } static addIrVersion(t, e) { t.addFieldInt64(0, e, t.createLong(0, 0)) } static addOpsetImport(t, e) { t.addFieldOffset(1, e, 0) } static createOpsetImportVector(t, e) { t.startVector(4, e.length, 4); for (let r = e.length - 1; r >= 0; r--) t.addOffset(e[r]); return t.endVector() } static startOpsetImportVector(t, e) { t.startVector(4, e, 4) } static addProducerName(t, e) { t.addFieldOffset(2, e, 0) } static addProducerVersion(t, e) { t.addFieldOffset(3, e, 0) } static addDomain(t, e) { t.addFieldOffset(4, e, 0) } static addModelVersion(t, e) { t.addFieldInt64(5, e, t.createLong(0, 0)) } static addDocString(t, e) { t.addFieldOffset(6, e, 0) } static addGraph(t, e) { t.addFieldOffset(7, e, 0) } static addGraphDocString(t, e) { t.addFieldOffset(8, e, 0) } static endModel(t) { return t.endObject() } static createModel(t, e, r, i, d, g, m, _, y, T) { return l.startModel(t), l.addIrVersion(t, e), l.addOpsetImport(t, r), l.addProducerName(t, i), l.addProducerVersion(t, d), l.addDomain(t, g), l.addModelVersion(t, m), l.addDocString(t, _), l.addGraph(t, y), l.addGraphDocString(t, T), l.endModel(t) } } p.Model = l })(h.fbs || (h.fbs = {})) })(s.experimental || (s.experimental = {})) }(n.onnxruntime || (n.onnxruntime = {})), function(s) { (function(h) { (function(p) { class l { constructor() { this.bb = null, this.bb_pos = 0 } __init(t, e) { return this.bb_pos = t, this.bb = e, this } static getRootAsKernelCreateInfos(t, e) { return (e || new l).__init(t.readInt32(t.position()) + t.position(), t) } static getSizePrefixedRootAsKernelCreateInfos(t, e) { return t.setPosition(t.position() + u.flatbuffers.SIZE_PREFIX_LENGTH), (e || new l).__init(t.readInt32(t.position()) + t.position(), t) } nodeIndices(t) { let e = this.bb.__offset(this.bb_pos, 4); return e ? this.bb.readUint32(this.bb.__vector(this.bb_pos + e) + 4 * t) : 0 } nodeIndicesLength() { let t = this.bb.__offset(this.bb_pos, 4); return t ? this.bb.__vector_len(this.bb_pos + t) : 0 } nodeIndicesArray() { let t = this.bb.__offset(this.bb_pos, 4); return t ? new Uint32Array(this.bb.bytes().buffer, this.bb.bytes().byteOffset + this.bb.__vector(this.bb_pos + t), this.bb.__vector_len(this.bb_pos + t)) : null } kernelDefHashes(t) { let e = this.bb.__offset(this.bb_pos, 6); return e ? this.bb.readUint64(this.bb.__vector(this.bb_pos + e) + 8 * t) : this.bb.createLong(0, 0) } kernelDefHashesLength() { let t = this.bb.__offset(this.bb_pos, 6); return t ? this.bb.__vector_len(this.bb_pos + t) : 0 } static startKernelCreateInfos(t) { t.startObject(2) } static addNodeIndices(t, e) { t.addFieldOffset(0, e, 0) } static createNodeIndicesVector(t, e) { t.startVector(4, e.length, 4); for (let r = e.length - 1; r >= 0; r--) t.addInt32(e[r]); return t.endVector() } static startNodeIndicesVector(t, e) { t.startVector(4, e, 4) } static addKernelDefHashes(t, e) { t.addFieldOffset(1, e, 0) } static createKernelDefHashesVector(t, e) { t.startVector(8, e.length, 8); for (let r = e.length - 1; r >= 0; r--) t.addInt64(e[r]); return t.endVector() } static startKernelDefHashesVector(t, e) { t.startVector(8, e, 8) } static endKernelCreateInfos(t) { return t.endObject() } static createKernelCreateInfos(t, e, r) { return l.startKernelCreateInfos(t), l.addNodeIndices(t, e), l.addKernelDefHashes(t, r), l.endKernelCreateInfos(t) } } p.KernelCreateInfos = l })(h.fbs || (h.fbs = {})) })(s.experimental || (s.experimental = {})) }(n.onnxruntime || (n.onnxruntime = {})), function(s) { (function(h) { (function(p) { class l { constructor() { this.bb = null, this.bb_pos = 0 } __init(t, e) { return this.bb_pos = t, this.bb = e, this } static getRootAsSubGraphSessionState(t, e) { return (e || new l).__init(t.readInt32(t.position()) + t.position(), t) } static getSizePrefixedRootAsSubGraphSessionState(t, e) { return t.setPosition(t.position() + u.flatbuffers.SIZE_PREFIX_LENGTH), (e || new l).__init(t.readInt32(t.position()) + t.position(), t) } graphId(t) { let e = this.bb.__offset(this.bb_pos, 4); return e ? this.bb.__string(this.bb_pos + e, t) : null } sessionState(t) { let e = this.bb.__offset(this.bb_pos, 6); return e ? (t || new s.experimental.fbs.SessionState).__init(this.bb.__indirect(this.bb_pos + e), this.bb) : null } static startSubGraphSessionState(t) { t.startObject(2) } static addGraphId(t, e) { t.addFieldOffset(0, e, 0) } static addSessionState(t, e) { t.addFieldOffset(1, e, 0) } static endSubGraphSessionState(t) { let e = t.endObject(); return t.requiredField(e, 4), e } static createSubGraphSessionState(t, e, r) { return l.startSubGraphSessionState(t), l.addGraphId(t, e), l.addSessionState(t, r), l.endSubGraphSessionState(t) } } p.SubGraphSessionState = l })(h.fbs || (h.fbs = {})) })(s.experimental || (s.experimental = {})) }(n.onnxruntime || (n.onnxruntime = {})), function(s) { (function(h) { (function(p) { class l { constructor() { this.bb = null, this.bb_pos = 0 } __init(t, e) { return this.bb_pos = t, this.bb = e, this } static getRootAsSessionState(t, e) { return (e || new l).__init(t.readInt32(t.position()) + t.position(), t) } static getSizePrefixedRootAsSessionState(t, e) { return t.setPosition(t.position() + u.flatbuffers.SIZE_PREFIX_LENGTH), (e || new l).__init(t.readInt32(t.position()) + t.position(), t) } kernels(t) { let e = this.bb.__offset(this.bb_pos, 4); return e ? (t || new s.experimental.fbs.KernelCreateInfos).__init(this.bb.__indirect(this.bb_pos + e), this.bb) : null } subGraphSessionStates(t, e) { let r = this.bb.__offset(this.bb_pos, 6); return r ? (e || new s.experimental.fbs.SubGraphSessionState).__init(this.bb.__indirect(this.bb.__vector(this.bb_pos + r) + 4 * t), this.bb) : null } subGraphSessionStatesLength() { let t = this.bb.__offset(this.bb_pos, 6); return t ? this.bb.__vector_len(this.bb_pos + t) : 0 } static startSessionState(t) { t.startObject(2) } static addKernels(t, e) { t.addFieldOffset(0, e, 0) } static addSubGraphSessionStates(t, e) { t.addFieldOffset(1, e, 0) } static createSubGraphSessionStatesVector(t, e) { t.startVector(4, e.length, 4); for (let r = e.length - 1; r >= 0; r--) t.addOffset(e[r]); return t.endVector() } static startSubGraphSessionStatesVector(t, e) { t.startVector(4, e, 4) } static endSessionState(t) { return t.endObject() } static createSessionState(t, e, r) { return l.startSessionState(t), l.addKernels(t, e), l.addSubGraphSessionStates(t, r), l.endSessionState(t) } } p.SessionState = l })(h.fbs || (h.fbs = {})) })(s.experimental || (s.experimental = {})) }(n.onnxruntime || (n.onnxruntime = {})), function(s) { (function(h) { (function(p) { class l { constructor() { this.bb = null, this.bb_pos = 0 } __init(t, e) { return this.bb_pos = t, this.bb = e, this } static getRootAsInferenceSession(t, e) { return (e || new l).__init(t.readInt32(t.position()) + t.position(), t) } static getSizePrefixedRootAsInferenceSession(t, e) { return t.setPosition(t.position() + u.flatbuffers.SIZE_PREFIX_LENGTH), (e || new l).__init(t.readInt32(t.position()) + t.position(), t) } static bufferHasIdentifier(t) { return t.__has_identifier("ORTM") } ortVersion(t) { let e = this.bb.__offset(this.bb_pos, 4); return e ? this.bb.__string(this.bb_pos + e, t) : null } model(t) { let e = this.bb.__offset(this.bb_pos, 6); return e ? (t || new s.experimental.fbs.Model).__init(this.bb.__indirect(this.bb_pos + e), this.bb) : null } sessionState(t) { let e = this.bb.__offset(this.bb_pos, 8); return e ? (t || new s.experimental.fbs.SessionState).__init(this.bb.__indirect(this.bb_pos + e), this.bb) : null } static startInferenceSession(t) { t.startObject(3) } static addOrtVersion(t, e) { t.addFieldOffset(0, e, 0) } static addModel(t, e) { t.addFieldOffset(1, e, 0) } static addSessionState(t, e) { t.addFieldOffset(2, e, 0) } static endInferenceSession(t) { return t.endObject() } static finishInferenceSessionBuffer(t, e) { t.finish(e, "ORTM") } static finishSizePrefixedInferenceSessionBuffer(t, e) { t.finish(e, "ORTM", !0) } static createInferenceSession(t, e, r, i) { return l.startInferenceSession(t), l.addOrtVersion(t, e), l.addModel(t, r), l.addSessionState(t, i), l.endInferenceSession(t) } } p.InferenceSession = l })(h.fbs || (h.fbs = {})) })(s.experimental || (s.experimental = {})) }(n.onnxruntime || (n.onnxruntime = {})) }, 7448: (b, n, a) => { Object.defineProperty(n, "__esModule", { value: !0 }), n.OnnxjsSessionHandler = void 0; const u = a(1670), c = a(9162); n.OnnxjsSessionHandler = class { constructor(f) { this.session = f, this.inputNames = this.session.inputNames, this.outputNames = this.session.outputNames } async dispose() {} async run(f, s, h) { const p = new Map; for (const t in f) if (Object.hasOwnProperty.call(f, t)) { const e = f[t]; p.set(t, new c.Tensor(e.dims, e.type, void 0, void 0, e.data)) } const l = await this.session.run(p), o = {}; return l.forEach((t, e) => { o[e] = new u.Tensor(t.type, t.data, t.dims) }), o } startProfiling() { this.session.startProfiling() } endProfiling() { this.session.endProfiling() } } }, 6919: (b, n, a) => { Object.defineProperty(n, "__esModule", { value: !0 }), n.Session = void 0; const u = a(7067), c = a(1296), f = a(7091), s = a(1036), h = a(6231), p = a(2644); n.Session = class { constructor(l = {}) { this._initialized = !1, this.backendHint = l.backendHint, this.profiler = h.Profiler.create(l.profiler), this.context = { profiler: this.profiler, graphInputTypes: [], graphInputDims: [] } } get inputNames() { return this._model.graph.getInputNames() } get outputNames() { return this._model.graph.getOutputNames() } startProfiling() { this.profiler.start() } endProfiling() { this.profiler.stop() } async loadModel(l, o, t) { await this.profiler.event("session", "Session.loadModel", async () => { const e = await (0, f.resolveBackend)(this.backendHint); if (this.sessionHandler = e.createSessionHandler(this.context), this._model = new p.Model, typeof l == "string") { const r = l.endsWith(".ort"); if (typeof fetch > "u") { const i = await (0, c.promisify)(u.readFile)(l); this.initialize(i, r) } else { const i = await fetch(l), d = await i.arrayBuffer(); this.initialize(new Uint8Array(d), r) } } else if (ArrayBuffer.isView(l)) this.initialize(l); else { const r = new Uint8Array(l, o || 0, t || l.byteLength); this.initialize(r) } }) } initialize(l, o) { if (this._initialized) throw new Error("already initialized"); this.profiler.event("session", "Session.initialize", () => { const t = this.sessionHandler.transformGraph ? this.sessionHandler : void 0; this._model.load(l, t, o), this.sessionHandler.onGraphInitialized && this.sessionHandler.onGraphInitialized(this._model.graph), this.initializeOps(this._model.graph), this._executionPlan = new s.ExecutionPlan(this._model.graph, this._ops, this.profiler) }), this._initialized = !0 } async run(l) { if (!this._initialized) throw new Error("session not initialized yet"); return this.profiler.event("session", "Session.run", async () => { const o = this.normalizeAndValidateInputs(l), t = await this._executionPlan.execute(this.sessionHandler, o); return this.createOutput(t) }) } normalizeAndValidateInputs(l) { const o = this._model.graph.getInputNames(); if (Array.isArray(l)) { if (l.length !== o.length) throw new Error(`incorrect input array length: expected ${o.length} but got ${l.length}`) } else { if (l.size !== o.length) throw new Error(`incorrect input map size: expected ${o.length} but got ${l.size}`); const t = new Array(l.size); let e = 0; for (let r = 0; r < o.length; ++r) { const i = l.get(o[r]); if (!i) throw new Error(`missing input tensor for: '${name}'`); t[e++] = i } l = t } if (this.context.graphInputTypes && this.context.graphInputTypes.length !== 0 && this.context.graphInputDims && this.context.graphInputDims.length !== 0) this.validateInputTensorDims(this.context.graphInputDims, l, !1); else { const t = this._model.graph.getInputIndices(), e = this._model.graph.getValues(), r = new Array(t.length); for (let i = 0; i < t.length; ++i) { const d = e[t[i]]; r[i] = d.type.shape.dims, this.context.graphInputTypes.push(d.type.tensorType), this.context.graphInputDims.push(l[i].dims) } this.validateInputTensorDims(r, l, !0) } return this.validateInputTensorTypes(this.context.graphInputTypes, l), l } validateInputTensorTypes(l, o) { for (let t = 0; t < o.length; t++) { const e = l[t], r = o[t].type; if (e !== r) throw new Error(`input tensor[${t}] check failed: expected type '${e}' but got ${r}`) } } validateInputTensorDims(l, o, t) { for (let e = 0; e < o.length; e++) { const r = l[e], i = o[e].dims; if (!this.compareTensorDims(r, i, t)) throw new Error(`input tensor[${e}] check failed: expected shape '[${r.join(",")}]' but got [${i.join(",")}]`) } } compareTensorDims(l, o, t) { if (l.length !== o.length) return !1; for (let e = 0; e < l.length; ++e) if (l[e] !== o[e] && (!t || l[e] !== 0)) return !1; return !0 } createOutput(l) { const o = this._model.graph.getOutputNames(); if (l.length !== o.length) throw new Error("expected number of outputs do not match number of generated outputs"); const t = new Map; for (let e = 0; e < o.length; ++e) t.set(o[e], l[e]); return t } initializeOps(l) { const o = l.getNodes(); this._ops = new Array(o.length); for (let t = 0; t < o.length; t++) this._ops[t] = this.sessionHandler.resolve(o[t], this._model.opsets, l) } } }, 9162: function(b, n, a) { var u = this && this.__importDefault || function(d) { return d && d.__esModule ? d : { default: d } }; Object.defineProperty(n, "__esModule", { value: !0 }), n.Tensor = void 0; const c = a(3442), f = u(a(3720)), s = a(1446), h = a(9395), p = a(2517); var l = h.onnxruntime.experimental.fbs; class o { get data() { if (this.cache === void 0) { const g = this.dataProvider(this.dataId); if (g.length !== this.size) throw new Error("Length of data provided by the Data Provider is inconsistent with the dims of this Tensor."); this.cache = g } return this.cache } get stringData() { if (this.type !== "string") throw new TypeError("data type is not string"); return this.data } get integerData() { switch (this.type) { case "uint8": case "int8": case "uint16": case "int16": case "int32": case "uint32": case "bool": return this.data; default: throw new TypeError("data type is not integer (uint8, int8, uint16, int16, int32, uint32, bool)") } } get floatData() { switch (this.type) { case "float32": case "float64": return this.data; default: throw new TypeError("data type is not float (float32, float64)") } } get numberData() { if (this.type !== "string") return this.data; throw new TypeError("type cannot be non-number (string)") } get(g) { return this.data[p.ShapeUtil.indicesToOffset(g, this.strides)] } set(g, m) { this.data[p.ShapeUtil.indicesToOffset(g, this.strides)] = m } async getData() { return this.cache === void 0 && (this.cache = await this.asyncDataProvider(this.dataId)), this.cache } get strides() { return this._strides || (this._strides = p.ShapeUtil.computeStrides(this.dims)), this._strides } constructor(g, m, _, y, T, w = c.Guid.create()) { this.dims = g, this.type = m, this.dataProvider = _, this.asyncDataProvider = y, this.cache = T, this.dataId = w, this.size = p.ShapeUtil.validateDimsAndCalcSize(g); const S = this.size, O = _ === void 0 && y === void 0 && T === void 0; if (T !== void 0 && T.length !== S) throw new RangeError("Input dims doesn't match data length."); if (m === "string") { if (!(T === void 0 || Array.isArray(T) && T.every(E => typeof E == "string"))) throw new TypeError("cache should be a string array"); O && (this.cache = new Array(S)) } else { if (T !== void 0) { const E = e(m); if (!(T instanceof E)) throw new TypeError(`cache should be type ${E.name}`) } if (O) { const E = new ArrayBuffer(S * function(v) { switch (v) { case "bool": case "int8": case "uint8": return 1; case "int16": case "uint16": return 2; case "int32": case "uint32": case "float32": return 4; case "float64": return 8; default: throw new Error(`cannot calculate sizeof() on type ${v}`) } }(m)); this.cache = function(v, P) { return new(e(P))(v) }(E, m) } } } static fromProto(g) { if (!g) throw new Error("cannot construct Value from an empty tensor"); const m = p.ProtoUtil.tensorDataTypeFromProto(g.dataType), _ = p.ProtoUtil.tensorDimsFromProto(g.dims), y = new o(_, m); if (m === "string") g.stringData.forEach((T, w) => { y.data[w] = (0, p.decodeUtf8String)(T) }); else if (g.rawData && typeof g.rawData.byteLength == "number" && g.rawData.byteLength > 0) { const T = y.data, w = new DataView(g.rawData.buffer, g.rawData.byteOffset, g.rawData.byteLength), S = t(g.dataType), O = g.rawData.byteLength / S; if (g.rawData.byteLength % S != 0) throw new Error("invalid buffer length"); if (T.length !== O) throw new Error("buffer length mismatch"); for (let E = 0; E < O; E++) { const v = i(w, g.dataType, E * S); T[E] = v } } else { let T; switch (g.dataType) { case s.onnx.TensorProto.DataType.FLOAT: T = g.floatData; break; case s.onnx.TensorProto.DataType.INT32: case s.onnx.TensorProto.DataType.INT16: case s.onnx.TensorProto.DataType.UINT16: case s.onnx.TensorProto.DataType.INT8: case s.onnx.TensorProto.DataType.UINT8: case s.onnx.TensorProto.DataType.BOOL: T = g.int32Data; break; case s.onnx.TensorProto.DataType.INT64: T = g.int64Data; break; case s.onnx.TensorProto.DataType.DOUBLE: T = g.doubleData; break; case s.onnx.TensorProto.DataType.UINT32: case s.onnx.TensorProto.DataType.UINT64: T = g.uint64Data; break; default: throw new Error("unspecific error") } if (T == null) throw new Error("failed to populate data from a tensorproto value"); const w = y.data; if (w.length !== T.length) throw new Error("array length mismatch"); for (let S = 0; S < T.length; S++) { const O = T[S]; f.default.isLong(O) ? w[S] = r(O, g.dataType) : w[S] = O } } return y } static fromData(g, m, _) { return new o(m, _, void 0, void 0, g) } static fromOrtTensor(g) { if (!g) throw new Error("cannot construct Value from an empty tensor"); const m = p.ProtoUtil.tensorDimsFromORTFormat(g), _ = p.ProtoUtil.tensorDataTypeFromProto(g.dataType()), y = new o(m, _); if (_ === "string") for (let T = 0; T < g.stringDataLength(); T++) y.data[T] = g.stringData(T); else if (g.rawDataArray() && typeof g.rawDataLength() == "number" && g.rawDataLength() > 0) { const T = y.data, w = new DataView(g.rawDataArray().buffer, g.rawDataArray().byteOffset, g.rawDataLength()), S = t(g.dataType()), O = g.rawDataLength() / S; if (g.rawDataLength() % S != 0) throw new Error("invalid buffer length"); if (T.length !== O) throw new Error("buffer length mismatch"); for (let E = 0; E < O; E++) { const v = i(w, g.dataType(), E * S); T[E] = v } } return y } } function t(d) { switch (d) { case s.onnx.TensorProto.DataType.UINT8: case s.onnx.TensorProto.DataType.INT8: case s.onnx.TensorProto.DataType.BOOL: return 1; case s.onnx.TensorProto.DataType.UINT16: case s.onnx.TensorProto.DataType.INT16: return 2; case s.onnx.TensorProto.DataType.FLOAT: case s.onnx.TensorProto.DataType.INT32: case s.onnx.TensorProto.DataType.UINT32: return 4; case s.onnx.TensorProto.DataType.INT64: case s.onnx.TensorProto.DataType.DOUBLE: case s.onnx.TensorProto.DataType.UINT64: return 8; default: throw new Error(`cannot calculate sizeof() on type ${s.onnx.TensorProto.DataType[d]}`) } } function e(d) { switch (d) { case "bool": case "uint8": return Uint8Array; case "int8": return Int8Array; case "int16": return Int16Array; case "uint16": return Uint16Array; case "int32": return Int32Array; case "uint32": return Uint32Array; case "float32": return Float32Array; case "float64": return Float64Array; default: throw new Error("unspecified error") } } function r(d, g) { if (g === s.onnx.TensorProto.DataType.INT64 || g === l.TensorDataType.INT64) { if (d.greaterThanOrEqual(2147483648) || d.lessThan(-2147483648)) throw new TypeError("int64 is not supported") } else { if (g !== s.onnx.TensorProto.DataType.UINT32 && g !== l.TensorDataType.UINT32 && g !== s.onnx.TensorProto.DataType.UINT64 && g !== l.TensorDataType.UINT64) throw new TypeError(`not a LONG type: ${s.onnx.TensorProto.DataType[g]}`); if (d.greaterThanOrEqual(4294967296) || d.lessThan(0)) throw new TypeError("uint64 is not supported") } return d.toNumber() } function i(d, g, m) { switch (g) { case s.onnx.TensorProto.DataType.BOOL: case s.onnx.TensorProto.DataType.UINT8: return d.getUint8(m); case s.onnx.TensorProto.DataType.INT8: return d.getInt8(m); case s.onnx.TensorProto.DataType.UINT16: return d.getUint16(m, !0); case s.onnx.TensorProto.DataType.INT16: return d.getInt16(m, !0); case s.onnx.TensorProto.DataType.FLOAT: return d.getFloat32(m, !0); case s.onnx.TensorProto.DataType.INT32: return d.getInt32(m, !0); case s.onnx.TensorProto.DataType.UINT32: return d.getUint32(m, !0); case s.onnx.TensorProto.DataType.INT64: return r(f.default.fromBits(d.getUint32(m, !0), d.getUint32(m + 4, !0), !1), g); case s.onnx.TensorProto.DataType.DOUBLE: return d.getFloat64(m, !0); case s.onnx.TensorProto.DataType.UINT64: return r(f.default.fromBits(d.getUint32(m, !0), d.getUint32(m + 4, !0), !0), g); default: throw new Error(`cannot read from DataView for type ${s.onnx.TensorProto.DataType[g]}`) } } n.Tensor = o }, 2517: function(b, n, a) { var u = this && this.__importDefault || function(g) { return g && g.__esModule ? g : { default: g } }; Object.defineProperty(n, "__esModule", { value: !0 }), n.decodeUtf8String = n.MAX_CLIP = n.MIN_CLIP = n.PoolConvUtil = n.ReduceUtil = n.SplitUtil = n.MathUtil = n.ShapeUtil = n.LongUtil = n.ProtoUtil = n.GemmUtil = n.arrayCopyHelper = n.BroadcastUtil = n.MatMulUtil = n.ArrayUtil = n.assert = n.checkInputsShape = void 0; const c = a(5686), f = u(a(3720)), s = a(1446), h = a(9162); n.checkInputsShape = function(g, ...m) { if (!g || g.length !== m.length) return !1; for (let _ = 0; _ < g.length; _++) if (!g[_].dims || g[_].dims.length !== m[_]) return !1; return !0 }, n.assert = function(g, m) { if (!g) throw new Error(typeof m == "string" ? m : m()) }, n.ArrayUtil = class { static arraysEqual(g, m) { if (g.length !== m.length) return !1; for (let _ = 0; _ < g.length; _++) if (g[_] !== m[_]) return !1; return !0 } }; class p { static preprocessInputShapes(m, _) { return [m.length === 1 ? [1, m[0]] : m, _.length === 1 ? [_[0], 1] : _] } static postprocessOutputShape(m, _, y) { _ === 1 && m.splice(m.length - 2, 1), y === 1 && m.pop() } static calcMatMulShape(m, _) { return m[1] !== _[0] ? void 0 : [m[0], _[1]] } } n.MatMulUtil = p; class l { static calcShape(m, _, y = !1) { const T = m.length, w = _.length; if (T === 0) return _; if (w === 0) return m; const S = Math.max(m.length, _.length), O = new Array(S); if (y) { if (T < 2 || w < 2) return; const E = p.calcMatMulShape([m[T - 2], m[T - 1]], [_[w - 2], _[w - 1]]); if (E === void 0) return; [O[S - 2], O[S - 1]] = E } for (let E = y ? 3 : 1; E <= S; E++) { const v = T - E < 0 ? 1 : m[T - E], P = w - E < 0 ? 1 : _[w - E]; if (v !== P && v > 1 && P > 1) return; O[S - E] = Math.max(v, P) } return O } static index(m, _) { const y = new Array(_.length); return l.fillIndex(m, _, y), y } static fillIndex(m, _, y) { const T = m.length - _.length; for (let w = 0; w < _.length; w++) y[w] = m[T + w] % _[w] } static calc(m, _, y, T, w) { const S = l.calcShape(m.dims, _.dims); if (S) { if (T && !e.areEqual(S, m.dims)) return; const O = e.size(S), E = T ? m : new h.Tensor(S, w || m.type); if (S.length === 0) E.set([], y(m.get([]), _.get([]))); else { const v = new Array(S.length), P = new Array(m.dims.length), L = new Array(_.dims.length); let V, R = 0, k = 0, Y = !1, C = !1; m.dims.length === 0 && (R = m.get([]), Y = !0), _.dims.length === 0 && (k = _.get([]), C = !0); for (let $ = 0; $ < O; $++) { V = $; for (let X = S.length - 1; X >= 0; X--) v[X] = V % S[X], V = Math.floor(V / S[X]); Y || (l.fillIndex(v, m.dims, P), R = m.get(P)), C || (l.fillIndex(v, _.dims, L), k = _.get(L)), E.set(v, y(R, k)) } } return E } } static isValidBroadcast(m, _) { const y = m.length, T = _.length; if (y > T) return !1; for (let w = 1; w <= y; w++) if (m[y - w] !== 1 && m[y - w] !== _[T - w]) return !1; return !0 } static getBroadcastDims(m, _) { const y = m.length, T = []; for (let w = 0; w < y; w++) { const S = y - 1 - w, O = m[S] || 1; (_[_.length - 1 - w] || 1) > 1 && O === 1 && T.unshift(S) } return T } } n.BroadcastUtil = l, n.arrayCopyHelper = function(g, m, _, y, T) { if (y < 0 || y >= m.length) throw new Error("sourceIndex out of bounds"); if (_ < 0 || _ >= g.length) throw new Error("targetIndex out of bounds"); if (y + T > m.length) throw new Error("source indices to be copied are outside bounds"); if (_ + T > g.length) throw new Error("target array is too small to hold result"); for (let w = 0; w < T; w++) g[_ + w] = m[y + w] }, n.GemmUtil = class { static getShapeOfGemmResult(g, m, _, y, T) { if (g.length !== 2 || _.length !== 2) throw new Error("shape need to be of size 2"); let w, S, O; m ? (w = g[1], S = g[0]) : (w = g[0], S = g[1]); let E = -1; if (y ? (O = _[0], E = 1) : (O = _[1], E = 0), _[E] !== S) throw new Error("dimension mismatch"); if (w <= 0 || O <= 0 || S <= 0) throw new Error("invalid shape specified"); if (T && !l.isValidBroadcast(T, [w, O])) throw new Error("gemm: invalid bias shape for broadcast"); return [w, O, S] } }; class o { static tensorDataTypeFromProto(m) { switch (m) { case s.onnx.TensorProto.DataType.INT8: return "int8"; case s.onnx.TensorProto.DataType.UINT8: return "uint8"; case s.onnx.TensorProto.DataType.BOOL: return "bool"; case s.onnx.TensorProto.DataType.INT16: return "int16"; case s.onnx.TensorProto.DataType.UINT16: return "uint16"; case s.onnx.TensorProto.DataType.INT32: return "int32"; case s.onnx.TensorProto.DataType.UINT32: return "uint32"; case s.onnx.TensorProto.DataType.FLOAT: return "float32"; case s.onnx.TensorProto.DataType.DOUBLE: return "float64"; case s.onnx.TensorProto.DataType.STRING: return "string"; case s.onnx.TensorProto.DataType.INT64: return "int32"; case s.onnx.TensorProto.DataType.UINT64: return "uint32"; default: throw new Error(`unsupported data type: ${s.onnx.TensorProto.DataType[m]}`) } } static tensorDataTypeStringToEnum(m) { switch (m) { case "int8": return s.onnx.TensorProto.DataType.INT8; case "uint8": return s.onnx.TensorProto.DataType.UINT8; case "bool": return s.onnx.TensorProto.DataType.BOOL; case "int16": return s.onnx.TensorProto.DataType.INT16; case "uint16": return s.onnx.TensorProto.DataType.UINT16; case "int32": return s.onnx.TensorProto.DataType.INT32; case "uint32": return s.onnx.TensorProto.DataType.UINT32; case "float32": return s.onnx.TensorProto.DataType.FLOAT; case "float64": return s.onnx.TensorProto.DataType.DOUBLE; case "string": return s.onnx.TensorProto.DataType.STRING; case "int64": return s.onnx.TensorProto.DataType.INT64; case "uint64": return s.onnx.TensorProto.DataType.UINT64; default: throw new Error(`unsupported data type: ${m}`) } } static tensorDimsFromProto(m) { return m.map(_ => f.default.isLong(_) ? _.toNumber() : _) } static tensorValueTypeFromProto(m) { return { tensorType: o.tensorDataTypeFromProto(m.elemType), shape: { dims: o.tensorDimsFromProto(m.shape.dim.map(_ => _.dimValue)) } } } static tensorDimsFromORTFormat(m) { const _ = []; for (let y = 0; y < m.dimsLength(); y++) _.push(t.longToNumber(m.dims(y))); return _ } static tensorAttributesFromORTFormat(m) { const _ = []; for (let y = 0; y < m.attributesLength(); y++) _.push(m.attributes(y)); return _ } } n.ProtoUtil = o; class t { static longToNumber(m, _) { return f.default.isLong(m) ? m.toNumber() : m instanceof c.flatbuffers.Long ? f.default.fromValue({ low: m.low, high: m.high, unsigned: _ != null && _ }).toNumber() : m } static isLong(m) { return f.default.isLong(m) || m instanceof c.flatbuffers.Long } } n.LongUtil = t; class e { static size(m) { return e.getSizeFromDimensionRange(m, 0, m.length) } static sizeFromDimension(m, _) { if (_ < 0 || _ > m.length) throw new Error(`invalid dimension of ${_} for sizeFromDimension as Tensor has ${m.length} dimensions.`); return e.getSizeFromDimensionRange(m, _, m.length) } static sizeToDimension(m, _) { if (_ < 0 || _ > m.length) throw new Error(`invalid dimension of ${_} for sizeToDimension as Tensor has ${m.length} dimensions.`); return e.getSizeFromDimensionRange(m, 0, _) } static getSizeFromDimensionRange(m, _, y) { let T = 1; for (let w = _; w < y; w++) { if (m[w] <= 0) throw new Error("cannot get valid size from specified dimension range. Most likely the range contains 0 or negative values in them."); T *= m[w] } return T } static computeStrides(m) { const _ = m.length; if (_ === 0) return []; if (_ === 1) return [1]; const y = new Array(_); y[_ - 1] = 1, y[_ - 2] = m[_ - 1]; for (let T = _ - 3; T >= 0; --T) y[T] = y[T + 1] * m[T + 1]; return y } static transpose(m) { return m.slice().reverse() } static indicesToOffset(m, _, y) { y === void 0 && (y = m.length); let T = 0; for (let w = 0; w < y; ++w) T += _[w] * m[w]; return T } static offsetToIndices(m, _) { const y = _.length; if (y === 0) return []; if (y === 1) return [m * _[0]]; const T = new Array(_.length); for (let w = 0; w < T.length - 1; ++w) T[w] = Math.floor(m / _[w]), m -= T[w] * _[w]; return T[T.length - 1] = m, T } static normalizeAxis(m, _) { if (m < -_ && m >= _) throw new Error("unsupported axis for this operation."); return m < 0 ? m + _ : m } static normalizeAxes(m, _) { return m.map(y => this.normalizeAxis(y, _)) } static incrementIndex(m, _, y) { if (_.length === 0 || m.length === 0) throw new Error("Index incrementing unsupported for scalar Tensor"); if (y === void 0) y = _.length; else if (y <= 0 || y > _.length) throw new Error("Incorrect axis to increment on"); for (let T = y - 1; T >= 0 && (m[T]++, !(m[T] < _[T])); --T) m[T] = 0 } static calculateReshapedDims(m, _) { if (_.length === 0) { if (m.length === 0 || e.size(m) === 1) return []; throw new Error("cannot reshape to a scalar Tensor") } const y = _.length, T = new Array(y); let w = -1, S = 1; for (let E = 0; E < y; E++) { if (_[E] < -1) throw new Error("a dimension in shape hints cannot be less than -1"); if (_[E] === -1) { if (w !== -1) throw new Error("at most one dimension in shape hints can be -1"); w = E } else { if (_[E] === 0) { if (E >= m.length) throw new Error("the dimension with value zero exceeds the dimension size of the input tensor"); T[E] = m[E] } else T[E] = _[E]; S *= T[E] } } const O = e.size(m); if (w !== -1) { if (O % S != 0) throw new Error(`the input tensor cannot be reshaped to the requested shape. Input shape: [${m}] Output shape: [${_}]`); T[w] = O / S } else if (S !== O) throw new Error("reshapedDims and originalDims don't have matching sizes"); return T } static sortBasedOnPerm(m, _) { return _ ? _.map(y => m[y]) : m.slice().reverse() } static padShape(m, _) { const y = m.length; return m.map((T, w) => T + _[w] + _[w + y]) } static areEqual(m, _) { return m.length === _.length && m.every((y, T) => y === _[T]) } static validateDimsAndCalcSize(m) { if (m.length > 6) throw new TypeError("Only rank 0 to 6 is supported for tensor shape."); let _ = 1; for (const y of m) { if (!Number.isInteger(y)) throw new TypeError(`Invalid shape: ${y} is not an integer`); if (y < 0 || y > 2147483647) throw new TypeError(`Invalid shape: length ${y} is not allowed`); _ *= y } return _ } static flattenShape(m, _) { _ < 0 && (_ += m.length); const y = m.reduce((w, S) => w * S, 1), T = m.slice(_).reduce((w, S) => w * S, 1); return [y / T, T] } static squeezeShape(m, _) { const y = new Array; _ = e.normalizeAxes(_, m.length); for (let T = 0; T < m.length; T++) { const w = _.indexOf(T) >= 0; if (w && m[T] !== 1) throw new Error("squeeze an axis of size different than 1"); (_.length === 0 && m[T] > 1 || _.length > 0 && !w) && y.push(m[T]) } return y } static unsqueezeShape(m, _) { const y = new Array(m.length + _.length); y.fill(0); for (let w = 0; w < _.length; w++) { const S = e.normalizeAxis(_[w], y.length); if (S >= y.length) throw new Error("'axes' has an out of range axis"); if (y[S] !== 0) throw new Error("'axes' has a duplicate axis"); y[S] = 1 } let T = 0; for (let w = 0; w < y.length; w++) y[w] === 0 && (y[w] = m[T++]); if (T !== m.length) throw new Error("the unsqueezed dimension could not be established"); return y } } n.ShapeUtil = e, n.MathUtil = class { static sqr(g, m, _, y, T) { if (y < 0 || y >= m.length) throw new Error("sourceIndex out of bounds"); if (_ < 0 || _ >= g.length) throw new Error("targetIndex out of bounds"); if (y + T > m.length) throw new Error("source indices to be copied are outside bounds"); if (_ + T > g.length) throw new Error("target array is too small to hold result"); for (let w = 0; w < T; w++) g[_ + w] += Math.pow(m[y + w], 2) } static axpy(g, m, _, y, T, w) { if (y < 0 || y >= m.length) throw new Error("sourceIndex out of bounds"); if (_ < 0 || _ >= g.length) throw new Error("targetIndex out of bounds"); if (y + T > m.length) throw new Error("source indices to be copied are outside bounds"); if (_ + T > g.length) throw new Error("target array is too small to hold result"); for (let S = 0; S < T; S++) g[_ + S] += w * m[y + S] } static powx(g, m, _, y, T, w) { if (y < 0 || y >= m.length) throw new Error("sourceIndex out of bounds"); if (_ < 0 || _ >= g.length) throw new Error("targetIndex out of bounds"); if (y + T > m.length) throw new Error("source indices to be copied are outside bounds"); if (_ + T > g.length) throw new Error("target array is too small to hold result"); for (let S = 0; S < T; S++) g[_ + S] = Math.pow(m[y + S], w) } static mul(g, m, _, y, T) { if (y < 0 || y >= m.length) throw new Error("sourceIndex out of bounds"); if (_ < 0 || _ >= g.length) throw new Error("targetIndex out of bounds"); if (y + T > m.length) throw new Error("source indices to be copied are outside bounds"); if (_ + T > g.length) throw new Error("target array is too small to hold result"); for (let w = 0; w < T; w++) g[_ + w] = m[y + w] * g[_ + w] } }; class r { static splitShape(m, _, y, T) { if (y.length === 0) { if (!T) throw new Error("need to know number of outputs when the 'split' attribute is not specified"); r.determineSplit(m[_], T, y) } const w = [], S = [0]; for (let O = 0; O < y.length; ++O) { O !== 0 && S.push(S[O - 1] + y[O - 1]); const E = m.slice(); E[_] = y[O], w.push(E) } return [w, S] } static determineSplit(m, _, y) { if (m % _ != 0) throw new Error("cannot split tensor to equal sized parts"); for (let T = 0; T < _; ++T) y.push(m / _) } } n.SplitUtil = r; class i { static calcReduce(m, _, y, T, w) { const S = m.dims.slice(0); _.length === 0 && S.forEach((R, k) => _.push(k)); const O = i.calcReduceShape(S, _, !0), E = e.size(O), v = new h.Tensor(O, m.type), P = e.computeStrides(O), L = e.computeStrides(S), V = new Array(S.length); for (let R = 0; R < E; R++) { const k = e.offsetToIndices(R, P); l.fillIndex(k, S, V), v.set(k, i.calcReduceByAxis(m.numberData, _, S, 0, e.indicesToOffset(V, L), T, w)) } return y ? v : new h.Tensor(i.calcReduceShape(S, _, y), v.type, void 0, void 0, v.data, v.dataId) } static calcReduceByAxis(m, _, y, T, w, S, O) { let E = 0; if (T >= _.length) return S(m[w]); const v = _[T], P = v >= y.length ? 1 : e.size(y.slice(v + 1)); for (let L = 0; L < y[v]; L++) E = L === 0 ? i.calcReduceByAxis(m, _, y, T + 1, w, S, O) : O(E, i.calcReduceByAxis(m, _, y, T + 1, w, S, O)), w += P; return E } static calcReduceShape(m, _, y) { const T = m.slice(); for (let w = 0; w < _.length; w++) T[_[w]] = y ? 1 : 0; return T.filter(w => w !== 0) } } n.ReduceUtil = i; class d { static adjustPoolAttributes(m, _, y, T, w, S) { if (!m && y.length !== _.length - 2) throw new Error("length of specified kernel shapes should be 2 less than length of input dimensions"); if (m) for (let O = 0; O < _.length - 2; O++) O >= y.length ? y.push(_[O + 2]) : y[O] = _[O + 2]; for (let O = 0; O < y.length; O++) if (O < T.length) { if (T[O] < 0) throw new Error("strides should be greater than or equal to 1") } else T.push(1); for (let O = 0; O < y.length; O++) if (O < w.length) { if (w[O] < 0) throw new Error("dilations should be greater than or equal to 1") } else w.push(1); for (let O = 0; O < 2 * y.length; O++) if (O < S.length) { if (S[O] < 0) throw new Error("pad should be greater than or equal to 1") } else S.push(0); for (let O = 0; O < y.length; O++) { if (y[O] <= 0) throw new Error("kernel shapes need to be greater than 0"); if (S[O] >= y[O] || S[O + y.length] >= y[O]) throw new Error("pads should be smaller than kernel") } } static adjustPadsBasedOnAutoPad(m, _, y, T, w, S) { if (S) { if (w.length !== 2 * (m.length - 2)) throw new Error("length of pads should be twice the length of data dimensions"); if (_.length !== m.length - 2) throw new Error("length of strides should be the length of data dimensions"); if (T.length !== m.length - 2) throw new Error("length of kernel shapes should be the length of data dimensions"); for (let O = 0; O < m.length - 2; O++) d.adjustPadAndReturnShape(m[O + 2], _[O], y[O], T[O], w, O, O + m.length - 2, S) } } static computePoolOutputShape(m, _, y, T, w, S, O) { if (_.length <= 0) throw new Error("input shape must be of size greater than 0"); const E = [_[0], _[1]]; return d.computeShapeHelper(m, _, E, y, T, w, S, O), E } static computeConvOutputShape(m, _, y, T, w, S, O) { if (m.length <= 0 || _.length <= 0) throw new Error("invalid input tensor dims or invalid filter tensor dims"); const E = [m[0], _[0]]; return d.computeShapeHelper(!1, m, E, y, T, w, S, O), E } static computeShapeHelper(m, _, y, T, w, S, O, E) { if (m) for (let v = 0; v < _.length - 2; v++) y.push(1); else for (let v = 0; v < _.length - 2; v++) y.push(d.adjustPadAndReturnShape(_[v + 2], T[v], w[v], S[v], O, v, v + _.length - 2, E)) } static adjustPadAndReturnShape(m, _, y, T, w, S, O, E) { const v = y * (T - 1) + 1; if (!E || E === "NOTSET") return Math.floor((m + w[S] + w[O] - v) / _ + 1); switch (E) { case "VALID": return w[S] = 0, w[O] = 0, Math.floor((m - v) / _ + 1); case "SAME_LOWER": case "SAME_UPPER": if (y !== 1) throw new Error("Dilation not supported for SAME_UPPER or SAME_LOWER"); { const P = ((m + _ - 1) / _ - 1) * _ + T - m; return w[S] = Math.floor(E === "SAME_LOWER" ? (P + 1) / 2 : P / 2), w[O] = P - w[S], Math.floor((m + P - T) / _ + 1) } default: throw new Error("Unsupported AutoPad type") } } } n.PoolConvUtil = d, n.MIN_CLIP = -34028234663852886e22, n.MAX_CLIP = 34028234663852886e22, n.decodeUtf8String = function(g) { return new TextDecoder().decode(g) } }, 7967: (b, n) => { Object.defineProperty(n, "__esModule", { value: !0 }), n.iterateExtraOptions = void 0, n.iterateExtraOptions = (a, u, c, f) => { if (typeof a == "object" && a !== null) { if (c.has(a)) throw new Error("Circular reference in options"); c.add(a) } Object.entries(a).forEach(([s, h]) => { const p = u ? u + s : s; if (typeof h == "object")(0, n.iterateExtraOptions)(h, p + ".", c, f); else if (typeof h == "string" || typeof h == "number") f(p, h.toString()); else { if (typeof h != "boolean") throw new Error("Can't handle extra config type: " + typeof h); f(p, h ? "1" : "0") } }) } }, 2157: function(b, n, a) { var u, c = this && this.__createBinding || (Object.create ? function(P, L, V, R) { R === void 0 && (R = V); var k = Object.getOwnPropertyDescriptor(L, V); k && !("get" in k ? !L.__esModule : k.writable || k.configurable) || (k = { enumerable: !0, get: function() { return L[V] } }), Object.defineProperty(P, R, k) } : function(P, L, V, R) { R === void 0 && (R = V), P[R] = L[V] }), f = this && this.__setModuleDefault || (Object.create ? function(P, L) { Object.defineProperty(P, "default", { enumerable: !0, value: L }) } : function(P, L) { P.default = L }), s = this && this.__importStar || function(P) { if (P && P.__esModule) return P; var L = {}; if (P != null) for (var V in P) V !== "default" && Object.prototype.hasOwnProperty.call(P, V) && c(L, P, V); return f(L, P), L }; Object.defineProperty(n, "__esModule", { value: !0 }), n.endProfiling = n.run = n.releaseSession = n.createSession = n.createSessionFinalize = n.createSessionAllocate = n.initOrt = n.initWasm = void 0; const h = a(1670), p = s(a(349)), l = a(6361), o = () => !!h.env.wasm.proxy && typeof document < "u"; let t, e, r, i = !1, d = !1, g = !1; const m = [], _ = [], y = [], T = [], w = [], S = [], O = () => { if (i || !d || g || !t) throw new Error("worker not ready") }, E = P => { switch (P.data.type) { case "init-wasm": i = !1, P.data.err ? (g = !0, e[1](P.data.err)) : (d = !0, e[0]()); break; case "init-ort": P.data.err ? r[1](P.data.err) : r[0](); break; case "create_allocate": P.data.err ? m.shift()[1](P.data.err) : m.shift()[0](P.data.out); break; case "create_finalize": P.data.err ? _.shift()[1](P.data.err) : _.shift()[0](P.data.out); break; case "create": P.data.err ? y.shift()[1](P.data.err) : y.shift()[0](P.data.out); break; case "release": P.data.err ? T.shift()[1](P.data.err) : T.shift()[0](); break; case "run": P.data.err ? w.shift()[1](P.data.err) : w.shift()[0](P.data.out); break; case "end-profiling": P.data.err ? S.shift()[1](P.data.err) : S.shift()[0]() } }, v = typeof document < "u" ? (u = document?.currentScript) === null || u === void 0 ? void 0 : u.src : void 0; n.initWasm = async () => { if (o()) { if (d) return; if (i) throw new Error("multiple calls to 'initWasm()' detected."); if (g) throw new Error("previous call to 'initWasm()' failed."); return i = !0, h.env.wasm.wasmPaths === void 0 && v && v.indexOf("blob:") !== 0 && (h.env.wasm.wasmPaths = v.substr(0, +v.lastIndexOf("/") + 1)), new Promise((P, L) => { t?.terminate(), t = a(9710).Z(), t.onmessage = E, e = [P, L]; const V = { type: "init-wasm", in: h.env.wasm }; t.postMessage(V) }) } return (0, l.initializeWebAssembly)(h.env.wasm) }, n.initOrt = async (P, L) => { if (o()) return O(), new Promise((V, R) => { r = [V, R]; const k = { type: "init-ort", in: { numThreads: P, loggingLevel: L } }; t.postMessage(k) }); p.initOrt(P, L) }, n.createSessionAllocate = async P => o() ? (O(), new Promise((L, V) => { m.push([L, V]); const R = { type: "create_allocate", in: { model: P } }; t.postMessage(R, [P.buffer]) })) : p.createSessionAllocate(P), n.createSessionFinalize = async (P, L) => o() ? (O(), new Promise((V, R) => { _.push([V, R]); const k = { type: "create_finalize", in: { modeldata: P, options: L } }; t.postMessage(k) })) : p.createSessionFinalize(P, L), n.createSession = async (P, L) => o() ? (O(), new Promise((V, R) => { y.push([V, R]); const k = { type: "create", in: { model: P, options: L } }; t.postMessage(k, [P.buffer]) })) : p.createSession(P, L), n.releaseSession = async P => { if (o()) return O(), new Promise((L, V) => { T.push([L, V]); const R = { type: "release", in: P }; t.postMessage(R) }); p.releaseSession(P) }, n.run = async (P, L, V, R, k) => o() ? (O(), new Promise((Y, C) => { w.push([Y, C]); const $ = { type: "run", in: { sessionId: P, inputIndices: L, inputs: V, outputIndices: R, options: k } }; t.postMessage($, p.extractTransferableBuffers(V)) })) : p.run(P, L, V, R, k), n.endProfiling = async P => { if (o()) return O(), new Promise((L, V) => { S.push([L, V]); const R = { type: "end-profiling", in: P }; t.postMessage(R) }); p.endProfiling(P) } }, 586: (b, n, a) => { Object.defineProperty(n, "__esModule", { value: !0 }), n.setRunOptions = void 0; const u = a(7967), c = a(4983), f = a(6361); n.setRunOptions = s => { const h = (0, f.getInstance)(); let p = 0; const l = [], o = s || {}; try { if (s?.logSeverityLevel === void 0) o.logSeverityLevel = 2; else if (typeof s.logSeverityLevel != "number" || !Number.isInteger(s.logSeverityLevel) || s.logSeverityLevel < 0 || s.logSeverityLevel > 4) throw new Error(`log serverity level is not valid: ${s.logSeverityLevel}`); if (s?.logVerbosityLevel === void 0) o.logVerbosityLevel = 0; else if (typeof s.logVerbosityLevel != "number" || !Number.isInteger(s.logVerbosityLevel)) throw new Error(`log verbosity level is not valid: ${s.logVerbosityLevel}`); s?.terminate === void 0 && (o.terminate = !1); let t = 0; if (s?.tag !== void 0 && (t = (0, c.allocWasmString)(s.tag, l)), p = h._OrtCreateRunOptions(o.logSeverityLevel, o.logVerbosityLevel, !!o.terminate, t), p === 0) throw new Error("Can't create run options"); return s?.extra !== void 0 && (0, u.iterateExtraOptions)(s.extra, "", new WeakSet, (e, r) => { const i = (0, c.allocWasmString)(e, l), d = (0, c.allocWasmString)(r, l); if (h._OrtAddRunConfigEntry(p, i, d) !== 0) throw new Error(`Can't set a run config entry: ${e} - ${r}`) }), [p, l] } catch (t) { throw p !== 0 && h._OrtReleaseRunOptions(p), l.forEach(h._free), t } } }, 2306: (b, n, a) => { Object.defineProperty(n, "__esModule", { value: !0 }), n.OnnxruntimeWebAssemblySessionHandler = void 0; const u = a(2806), c = a(1670), f = a(2850), s = a(2157); let h; n.OnnxruntimeWebAssemblySessionHandler = class { async createSessionAllocate(p) { const l = await fetch(p), o = await l.arrayBuffer(); return (0, s.createSessionAllocate)(new Uint8Array(o)) } async loadModel(p, l) { if (h || (await (0, s.initOrt)(c.env.wasm.numThreads, (o => { switch (o) { case "verbose": return 0; case "info": return 1; case "warning": return 2; case "error": return 3; case "fatal": return 4; default: throw new Error(`unsupported logging level: ${o}`) } })(c.env.logLevel)), h = !0), typeof p == "string") if (typeof fetch > "u") { const o = await (0, f.promisify)(u.readFile)(p); [this.sessionId, this.inputNames, this.outputNames] = await (0, s.createSession)(o, l) } else { const o = await this.createSessionAllocate(p); [this.sessionId, this.inputNames, this.outputNames] = await (0, s.createSessionFinalize)(o, l) } else [this.sessionId, this.inputNames, this.outputNames] = await (0, s.createSession)(p, l) } async dispose() { return (0, s.releaseSession)(this.sessionId) } async run(p, l, o) { const t = [], e = []; Object.entries(p).forEach(g => { const m = g[0], _ = g[1], y = this.inputNames.indexOf(m); if (y === -1) throw new Error(`invalid input '${m}'`); t.push(_), e.push(y) }); const r = []; Object.entries(l).forEach(g => { const m = g[0], _ = this.outputNames.indexOf(m); if (_ === -1) throw new Error(`invalid output '${m}'`); r.push(_) }); const i = await (0, s.run)(this.sessionId, e, t.map(g => [g.type, g.dims, g.data]), r, o), d = {}; for (let g = 0; g < i.length; g++) d[this.outputNames[r[g]]] = new c.Tensor(i[g][0], i[g][2], i[g][1]); return d } startProfiling() {} endProfiling() { (0, s.endProfiling)(this.sessionId) } } }, 4919: (b, n, a) => { Object.defineProperty(n, "__esModule", { value: !0 }), n.setSessionOptions = void 0; const u = a(7967), c = a(4983), f = a(6361); n.setSessionOptions = s => { const h = (0, f.getInstance)(); let p = 0; const l = [], o = s || {}; (t => { t.extra || (t.extra = {}), t.extra.session || (t.extra.session = {}); const e = t.extra.session; e.use_ort_model_bytes_directly || (e.use_ort_model_bytes_directly = "1") })(o); try { s?.graphOptimizationLevel === void 0 && (o.graphOptimizationLevel = "all"); const t = (i => { switch (i) { case "disabled": return 0; case "basic": return 1; case "extended": return 2; case "all": return 99; default: throw new Error(`unsupported graph optimization level: ${i}`) } })(o.graphOptimizationLevel); s?.enableCpuMemArena === void 0 && (o.enableCpuMemArena = !0), s?.enableMemPattern === void 0 && (o.enableMemPattern = !0), s?.executionMode === void 0 && (o.executionMode = "sequential"); const e = (i => { switch (i) { case "sequential": return 0; case "parallel": return 1; default: throw new Error(`unsupported execution mode: ${i}`) } })(o.executionMode); let r = 0; if (s?.logId !== void 0 && (r = (0, c.allocWasmString)(s.logId, l)), s?.logSeverityLevel === void 0) o.logSeverityLevel = 2; else if (typeof s.logSeverityLevel != "number" || !Number.isInteger(s.logSeverityLevel) || s.logSeverityLevel < 0 || s.logSeverityLevel > 4) throw new Error(`log serverity level is not valid: ${s.logSeverityLevel}`); if (s?.logVerbosityLevel === void 0) o.logVerbosityLevel = 0; else if (typeof s.logVerbosityLevel != "number" || !Number.isInteger(s.logVerbosityLevel)) throw new Error(`log verbosity level is not valid: ${s.logVerbosityLevel}`); if (s?.enableProfiling === void 0 && (o.enableProfiling = !1), p = h._OrtCreateSessionOptions(t, !!o.enableCpuMemArena, !!o.enableMemPattern, e, !!o.enableProfiling, 0, r, o.logSeverityLevel, o.logVerbosityLevel), p === 0) throw new Error("Can't create session options"); return s?.executionProviders && ((i, d, g) => { for (const m of d) { let _ = typeof m == "string" ? m : m.name; switch (_) { case "xnnpack": _ = "XNNPACK"; break; case "wasm": case "cpu": continue; default: throw new Error(`not supported EP: ${_}`) } const y = (0, c.allocWasmString)(_, g); if ((0, f.getInstance)()._OrtAppendExecutionProvider(i, y) !== 0) throw new Error(`Can't append execution provider: ${_}`) } })(p, s.executionProviders, l), s?.extra !== void 0 && (0, u.iterateExtraOptions)(s.extra, "", new WeakSet, (i, d) => { const g = (0, c.allocWasmString)(i, l), m = (0, c.allocWasmString)(d, l); if (h._OrtAddSessionConfigEntry(p, g, m) !== 0) throw new Error(`Can't set a session config entry: ${i} - ${d}`) }), [p, l] } catch (t) { throw p !== 0 && h._OrtReleaseSessionOptions(p), l.forEach(h._free), t } } }, 4983: (b, n, a) => { Object.defineProperty(n, "__esModule", { value: !0 }), n.allocWasmString = void 0; const u = a(6361); n.allocWasmString = (c, f) => { const s = (0, u.getInstance)(), h = s.lengthBytesUTF8(c) + 1, p = s._malloc(h); return s.stringToUTF8(c, p, h), f.push(p), p } }, 349: (b, n, a) => { Object.defineProperty(n, "__esModule", { value: !0 }), n.extractTransferableBuffers = n.endProfiling = n.run = n.releaseSession = n.createSession = n.createSessionFinalize = n.createSessionAllocate = n.initOrt = void 0; const u = a(586), c = a(4919), f = a(4983), s = a(6361); n.initOrt = (t, e) => { const r = (0, s.getInstance)()._OrtInit(t, e); if (r !== 0) throw new Error(`Can't initialize onnxruntime. error code = ${r}`) }; const h = new Map; n.createSessionAllocate = t => { const e = (0, s.getInstance)(), r = e._malloc(t.byteLength); return e.HEAPU8.set(t, r), [r, t.byteLength] }, n.createSessionFinalize = (t, e) => { const r = (0, s.getInstance)(); let i = 0, d = 0, g = []; try { if ([d, g] = (0, c.setSessionOptions)(e), i = r._OrtCreateSession(t[0], t[1], d), i === 0) throw new Error("Can't create a session") } finally { r._free(t[0]), r._OrtReleaseSessionOptions(d), g.forEach(r._free) } const m = r._OrtGetInputCount(i), _ = r._OrtGetOutputCount(i), y = [], T = [], w = [], S = []; for (let O = 0; O < m; O++) { const E = r._OrtGetInputName(i, O); if (E === 0) throw new Error("Can't get an input name"); T.push(E), y.push(r.UTF8ToString(E)) } for (let O = 0; O < _; O++) { const E = r._OrtGetOutputName(i, O); if (E === 0) throw new Error("Can't get an output name"); S.push(E), w.push(r.UTF8ToString(E)) } return h.set(i, [i, T, S]), [i, y, w] }, n.createSession = (t, e) => { const r = (0, n.createSessionAllocate)(t); return (0, n.createSessionFinalize)(r, e) }, n.releaseSession = t => { const e = (0, s.getInstance)(), r = h.get(t); if (!r) throw new Error("invalid session id"); const i = r[0], d = r[1], g = r[2]; d.forEach(e._OrtFree), g.forEach(e._OrtFree), e._OrtReleaseSession(i), h.delete(t) }; const p = t => { switch (t) { case "int8": return 3; case "uint8": return 2; case "bool": return 9; case "int16": return 5; case "uint16": return 4; case "int32": return 6; case "uint32": return 12; case "float32": return 1; case "float64": return 11; case "string": return 8; case "int64": return 7; case "uint64": return 13; default: throw new Error(`unsupported data type: ${t}`) } }, l = t => { switch (t) { case 3: return "int8"; case 2: return "uint8"; case 9: return "bool"; case 5: return "int16"; case 4: return "uint16"; case 6: return "int32"; case 12: return "uint32"; case 1: return "float32"; case 11: return "float64"; case 8: return "string"; case 7: return "int64"; case 13: return "uint64"; default: throw new Error(`unsupported data type: ${t}`) } }, o = t => { switch (t) { case "float32": return Float32Array; case "uint8": case "bool": return Uint8Array; case "int8": return Int8Array; case "uint16": return Uint16Array; case "int16": return Int16Array; case "int32": return Int32Array; case "float64": return Float64Array; case "uint32": return Uint32Array; case "int64": return BigInt64Array; case "uint64": return BigUint64Array; default: throw new Error(`unsupported type: ${t}`) } }; n.run = (t, e, r, i, d) => { const g = (0, s.getInstance)(), m = h.get(t); if (!m) throw new Error("invalid session id"); const _ = m[0], y = m[1], T = m[2], w = e.length, S = i.length; let O = 0, E = []; const v = [], P = []; try { [O, E] = (0, u.setRunOptions)(d); for (let C = 0; C < w; C++) { const $ = r[C][0], X = r[C][1], z = r[C][2]; let Z, J; if (Array.isArray(z)) { J = 4 * z.length, Z = g._malloc(J), P.push(Z); let Te = Z / 4; for (let se = 0; se < z.length; se++) { if (typeof z[se] != "string") throw new TypeError(`tensor data at index ${se} is not a string`); g.HEAPU32[Te++] = (0, f.allocWasmString)(z[se], P) } } else J = z.byteLength, Z = g._malloc(J), P.push(Z), g.HEAPU8.set(new Uint8Array(z.buffer, z.byteOffset, J), Z); const ue = g.stackSave(), Se = g.stackAlloc(4 * X.length); try { let Te = Se / 4; X.forEach(ye => g.HEAP32[Te++] = ye); const se = g._OrtCreateTensor(p($), Z, J, Se, X.length); if (se === 0) throw new Error("Can't create a tensor"); v.push(se) } finally { g.stackRestore(ue) } } const L = g.stackSave(), V = g.stackAlloc(4 * w), R = g.stackAlloc(4 * w), k = g.stackAlloc(4 * S), Y = g.stackAlloc(4 * S); try { let C = V / 4, $ = R / 4, X = k / 4, z = Y / 4; for (let ue = 0; ue < w; ue++) g.HEAPU32[C++] = v[ue], g.HEAPU32[$++] = y[e[ue]]; for (let ue = 0; ue < S; ue++) g.HEAPU32[X++] = 0, g.HEAPU32[z++] = T[i[ue]]; let Z = g._OrtRun(_, R, V, w, Y, S, k, O); const J = []; if (Z === 0) for (let ue = 0; ue < S; ue++) { const Se = g.HEAPU32[k / 4 + ue], Te = g.stackSave(), se = g.stackAlloc(16); let ye, be = 0; try { if (Z = g._OrtGetTensorData(Se, se, se + 4, se + 8, se + 12), Z !== 0) throw new Error(`Can't access output tensor data. error code = ${Z}`); let Ie = se / 4; const Le = g.HEAPU32[Ie++]; be = g.HEAPU32[Ie++]; const ve = g.HEAPU32[Ie++], Ne = g.HEAPU32[Ie++], Fe = []; for (let Oe = 0; Oe < Ne; Oe++) Fe.push(g.HEAPU32[ve / 4 + Oe]); g._OrtFree(ve); const Me = Fe.length === 0 ? 1 : Fe.reduce((Oe, Be) => Oe * Be); if (ye = l(Le), ye === "string") { const Oe = []; let Be = be / 4; for (let Ue = 0; Ue < Me; Ue++) { const ze = g.HEAPU32[Be++], He = Ue === Me - 1 ? void 0 : g.HEAPU32[Be] - ze; Oe.push(g.UTF8ToString(ze, He)) } J.push([ye, Fe, Oe]) } else { const Oe = new(o(ye))(Me); new Uint8Array(Oe.buffer, Oe.byteOffset, Oe.byteLength).set(g.HEAPU8.subarray(be, be + Oe.byteLength)), J.push([ye, Fe, Oe]) } } finally { g.stackRestore(Te), ye === "string" && be && g._free(be), g._OrtReleaseTensor(Se) } } if (Z === 0) return J; throw new Error(`failed to call OrtRun(). error code = ${Z}.`) } finally { g.stackRestore(L) } } finally { v.forEach(g._OrtReleaseTensor), P.forEach(g._free), g._OrtReleaseRunOptions(O), E.forEach(g._free) } }, n.endProfiling = t => { const e = (0, s.getInstance)(), r = h.get(t); if (!r) throw new Error("invalid session id"); const i = r[0], d = e._OrtEndProfiling(i); if (d === 0) throw new Error("Can't get an profile file name"); e._OrtFree(d) }, n.extractTransferableBuffers = t => { const e = []; for (const r of t) { const i = r[2]; !Array.isArray(i) && i.buffer && e.push(i.buffer) } return e } }, 6361: function(b, n, a) { var u = this && this.__createBinding || (Object.create ? function(d, g, m, _) { _ === void 0 && (_ = m); var y = Object.getOwnPropertyDescriptor(g, m); y && !("get" in y ? !g.__esModule : y.writable || y.configurable) || (y = { enumerable: !0, get: function() { return g[m] } }), Object.defineProperty(d, _, y) } : function(d, g, m, _) { _ === void 0 && (_ = m), d[_] = g[m] }), c = this && this.__setModuleDefault || (Object.create ? function(d, g) { Object.defineProperty(d, "default", { enumerable: !0, value: g }) } : function(d, g) { d.default = g }), f = this && this.__importStar || function(d) { if (d && d.__esModule) return d; var g = {}; if (d != null) for (var m in d) m !== "default" && Object.prototype.hasOwnProperty.call(d, m) && u(g, d, m); return c(g, d), g }, s = this && this.__importDefault || function(d) { return d && d.__esModule ? d : { default: d } }; Object.defineProperty(n, "__esModule", { value: !0 }), n.dispose = n.getInstance = n.initializeWebAssembly = void 0; const h = f(a(6449)), p = s(a(932)), l = a(3474); let o, t = !1, e = !1, r = !1; const i = (d, g) => g ? d ? "ort-wasm-simd-threaded.wasm" : "ort-wasm-threaded.wasm" : d ? "ort-wasm-simd.wasm" : "ort-wasm.wasm"; n.initializeWebAssembly = async d => { if (t) return Promise.resolve(); if (e) throw new Error("multiple calls to 'initializeWebAssembly()' detected."); if (r) throw new Error("previous call to 'initializeWebAssembly()' failed."); e = !0; const g = d.initTimeout, m = d.numThreads, _ = d.simd, y = m > 1 && (() => { try { return typeof SharedArrayBuffer < "u" && (typeof MessageChannel < "u" && new MessageChannel().port1.postMessage(new SharedArrayBuffer(1)), WebAssembly.validate(new Uint8Array([0, 97, 115, 109, 1, 0, 0, 0, 1, 4, 1, 96, 0, 0, 3, 2, 1, 0, 5, 4, 1, 3, 1, 1, 10, 11, 1, 9, 0, 65, 0, 254, 16, 2, 0, 26, 11]))) } catch { return !1 } })(), T = _ && (() => { try { return WebAssembly.validate(new Uint8Array([0, 97, 115, 109, 1, 0, 0, 0, 1, 4, 1, 96, 0, 0, 3, 2, 1, 0, 10, 30, 1, 28, 0, 65, 0, 253, 15, 253, 12, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 253, 186, 1, 26, 11])) } catch { return !1 } })(), w = typeof d.wasmPaths == "string" ? d.wasmPaths : void 0, S = i(!1, y), O = i(T, y), E = typeof d.wasmPaths == "object" ? d.wasmPaths[O] : void 0; let v = !1; const P = []; if (g > 0 && P.push(new Promise(L => { setTimeout(() => { v = !0, L() }, g) })), P.push(new Promise((L, V) => { const R = y ? l : p.default, k = { locateFile: (Y, C) => y && Y.endsWith(".worker.js") && typeof Blob < "u" ? URL.createObjectURL(new Blob([a(4154)], { type: "text/javascript" })) : Y === S ? E ?? (w ?? C) + O : C + Y }; if (y) if (typeof Blob > "u") k.mainScriptUrlOrBlob = h.join("/", "ort-wasm-threaded.js"); else { const Y = `var ortWasmThreaded=(function(){var _scriptDir;return ${R.toString()}})();`; k.mainScriptUrlOrBlob = new Blob([Y], { type: "text/javascript" }) } R(k).then(Y => { e = !1, t = !0, o = Y, L() }, Y => { e = !1, r = !0, V(Y) }) })), await Promise.race(P), v) throw new Error(`WebAssembly backend initializing failed due to timeout: ${g}ms`) }, n.getInstance = () => { if (t && o) return o; throw new Error("WebAssembly is not initialized yet.") }, n.dispose = () => { var d; !t || e || r || (e = !0, (d = o.PThread) === null || d === void 0 || d.terminateAllThreads(), o = void 0, e = !1, t = !1, r = !0) } }, 9710: (b, n, a) => { a.d(n, { Z: () => f }); var u = a(477), c = a.n(u); function f() { return c()('/*!\n* ONNX Runtime Web v1.14.0\n* Copyright (c) Microsoft Corporation. All rights reserved.\n* Licensed under the MIT License.\n*/\n(()=>{var t={474:(t,e,n)=>{var _scriptDir,r=(_scriptDir=(_scriptDir="undefined"!=typeof document&&document.currentScript?document.currentScript.src:void 0)||"/index.js",function(t){function e(){return j.buffer!=D&&N(j.buffer),P}function r(){return j.buffer!=D&&N(j.buffer),U}function a(){return j.buffer!=D&&N(j.buffer),F}function i(){return j.buffer!=D&&N(j.buffer),I}function o(){return j.buffer!=D&&N(j.buffer),W}var u,c,s;t=t||{},u||(u=void 0!==t?t:{}),u.ready=new Promise((function(t,e){c=t,s=e}));var l,f,p,h,d,y,b=Object.assign({},u),m="./this.program",g=(t,e)=>{throw e},v="object"==typeof window,w="function"==typeof importScripts,_="object"==typeof process&&"object"==typeof process.versions&&"string"==typeof process.versions.node,O=u.ENVIRONMENT_IS_PTHREAD||!1,A="";function S(t){return u.locateFile?u.locateFile(t,A):A+t}if(_){let e;A=w?n(908).dirname(A)+"/":"//",y=()=>{d||(h=n(384),d=n(908))},l=function(t,e){return y(),t=d.normalize(t),h.readFileSync(t,e?void 0:"utf8")},p=t=>((t=l(t,!0)).buffer||(t=new Uint8Array(t)),t),f=(t,e,n)=>{y(),t=d.normalize(t),h.readFile(t,(function(t,r){t?n(t):e(r.buffer)}))},1{if(Q())throw process.exitCode=t,e;e instanceof ct||x("exiting due to exception: "+e),process.exit(t)},u.inspect=function(){return"[Emscripten Module object]"};try{e=n(925)}catch(t){throw console.error(\'The "worker_threads" module is not supported in this node.js build - perhaps a newer version is needed?\'),t}n.g.Worker=e.Worker}else(v||w)&&(w?A=self.location.href:"undefined"!=typeof document&&document.currentScript&&(A=document.currentScript.src),_scriptDir&&(A=_scriptDir),A=0!==A.indexOf("blob:")?A.substr(0,A.replace(/[?#].*/,"").lastIndexOf("/")+1):"",_||(l=t=>{var e=new XMLHttpRequest;return e.open("GET",t,!1),e.send(null),e.responseText},w&&(p=t=>{var e=new XMLHttpRequest;return e.open("GET",t,!1),e.responseType="arraybuffer",e.send(null),new Uint8Array(e.response)}),f=(t,e,n)=>{var r=new XMLHttpRequest;r.open("GET",t,!0),r.responseType="arraybuffer",r.onload=()=>{200==r.status||0==r.status&&r.response?e(r.response):n()},r.onerror=n,r.send(null)}));_&&"undefined"==typeof performance&&(n.g.performance=n(953).performance);var T=console.log.bind(console),E=console.warn.bind(console);_&&(y(),T=t=>h.writeSync(1,t+"\\n"),E=t=>h.writeSync(2,t+"\\n"));var M,C=u.print||T,x=u.printErr||E;Object.assign(u,b),b=null,u.thisProgram&&(m=u.thisProgram),u.quit&&(g=u.quit),u.wasmBinary&&(M=u.wasmBinary);var R=u.noExitRuntime||!1;"object"!=typeof WebAssembly&&at("no native wasm support detected");var j,k,D,P,U,F,I,W,H=!1,L="undefined"!=typeof TextDecoder?new TextDecoder("utf8"):void 0;function z(t,e,n){var r=(e>>>=0)+n;for(n=e;t[n]&&!(n>=r);)++n;if(16(a=224==(240&a)?(15&a)<<12|i<<6|o:(7&a)<<18|i<<12|o<<6|63&t[e++])?r+=String.fromCharCode(a):(a-=65536,r+=String.fromCharCode(55296|a>>10,56320|1023&a))}}else r+=String.fromCharCode(a)}return r}function Y(t,e){return(t>>>=0)?z(r(),t,e):""}function B(t,e,n,r){if(!(0>>=0;r=n+r-1;for(var i=0;i=o&&(o=65536+((1023&o)<<10)|1023&t.charCodeAt(++i)),127>=o){if(n>=r)break;e[n++>>>0]=o}else{if(2047>=o){if(n+1>=r)break;e[n++>>>0]=192|o>>6}else{if(65535>=o){if(n+2>=r)break;e[n++>>>0]=224|o>>12}else{if(n+3>=r)break;e[n++>>>0]=240|o>>18,e[n++>>>0]=128|o>>12&63}e[n++>>>0]=128|o>>6&63}e[n++>>>0]=128|63&o}}return e[n>>>0]=0,n-a}function G(t){for(var e=0,n=0;n=r?e++:2047>=r?e+=2:55296<=r&&57343>=r?(e+=4,++n):e+=3}return e}function N(t){D=t,u.HEAP8=P=new Int8Array(t),u.HEAP16=new Int16Array(t),u.HEAP32=F=new Int32Array(t),u.HEAPU8=U=new Uint8Array(t),u.HEAPU16=new Uint16Array(t),u.HEAPU32=I=new Uint32Array(t),u.HEAPF32=new Float32Array(t),u.HEAPF64=W=new Float64Array(t)}O&&(D=u.buffer);var V=u.INITIAL_MEMORY||16777216;if(O)j=u.wasmMemory,D=u.buffer;else if(u.wasmMemory)j=u.wasmMemory;else if(!((j=new WebAssembly.Memory({initial:V/65536,maximum:65536,shared:!0})).buffer instanceof SharedArrayBuffer))throw x("requested a shared WebAssembly.Memory but the returned buffer is not a SharedArrayBuffer, indicating that while the browser has SharedArrayBuffer it does not have WebAssembly threads support - you may need to set a flag"),_&&console.log("(on node you may need: --experimental-wasm-threads --experimental-wasm-bulk-memory and also use a recent version)"),Error("bad memory");j&&(D=j.buffer),V=D.byteLength,N(D);var $,q=[],X=[],J=[],Z=[];function Q(){return R||!1}function K(){var t=u.preRun.shift();q.unshift(t)}var tt,et=0,nt=null,rt=null;function at(t){throw O?postMessage({cmd:"onAbort",arg:t}):u.onAbort&&u.onAbort(t),x(t="Aborted("+t+")"),H=!0,t=new WebAssembly.RuntimeError(t+". Build with -sASSERTIONS for more info."),s(t),t}function it(){return tt.startsWith("data:application/octet-stream;base64,")}function ot(){var t=tt;try{if(t==tt&&M)return new Uint8Array(M);if(p)return p(t);throw"both async and sync fetching of the wasm failed"}catch(t){at(t)}}tt="ort-wasm-threaded.wasm",it()||(tt=S(tt));var ut={};function ct(t){this.name="ExitStatus",this.message="Program terminated with exit("+t+")",this.status=t}function st(t){(t=ht.Vb[t])||at(),ht.mc(t)}function lt(t){var e=ht.Cc();if(!e)return 6;ht.ac.push(e),ht.Vb[t.Ub]=e,e.Ub=t.Ub;var n={cmd:"run",start_routine:t.Ic,arg:t.zc,pthread_ptr:t.Ub};return e.$b=()=>{n.time=performance.now(),e.postMessage(n,t.Nc)},e.loaded&&(e.$b(),delete e.$b),0}function ft(t){if(O)return $t(1,1,t);Q()||(ht.oc(),u.onExit&&u.onExit(t),H=!0),g(t,new ct(t))}function pt(t,e){if(!e&&O)throw bt(t),"unwind";Q()||O||(me(),dt(J),be(0),re[1].length&&ae(1,10),re[2].length&&ae(2,10),ht.oc()),ft(t)}var ht={Yb:[],ac:[],qc:[],Vb:{},fc:function(){O&&ht.Ec()},Pc:function(){},Ec:function(){ht.receiveObjectTransfer=ht.Gc,ht.threadInitTLS=ht.pc,ht.setExitStatus=ht.nc,R=!1},nc:function(){},oc:function(){for(var t of Object.values(ht.Vb))ht.mc(t);for(t of ht.Yb)t.terminate();ht.Yb=[]},mc:function(t){var e=t.Ub;delete ht.Vb[e],ht.Yb.push(t),ht.ac.splice(ht.ac.indexOf(t),1),t.Ub=0,Oe(e)},Gc:function(){},pc:function(){ht.qc.forEach((t=>t()))},Fc:function(t,e){t.onmessage=n=>{var r=(n=n.data).cmd;if(t.Ub&&(ht.Bc=t.Ub),n.targetThread&&n.targetThread!=he()){var a=ht.Vb[n.Qc];a?a.postMessage(n,n.transferList):x(\'Internal error! Worker sent a message "\'+r+\'" to target pthread \'+n.targetThread+", but that thread no longer exists!")}else"processProxyingQueue"===r?zt(n.queue):"spawnThread"===r?lt(n):"cleanupThread"===r?st(n.thread):"killThread"===r?(n=n.thread,r=ht.Vb[n],delete ht.Vb[n],r.terminate(),Oe(n),ht.ac.splice(ht.ac.indexOf(r),1),r.Ub=0):"cancelThread"===r?ht.Vb[n.thread].postMessage({cmd:"cancel"}):"loaded"===r?(t.loaded=!0,e&&e(t),t.$b&&(t.$b(),delete t.$b)):"print"===r?C("Thread "+n.threadId+": "+n.text):"printErr"===r?x("Thread "+n.threadId+": "+n.text):"alert"===r?alert("Thread "+n.threadId+": "+n.text):"setimmediate"===n.target?t.postMessage(n):"onAbort"===r?u.onAbort&&u.onAbort(n.arg):r&&x("worker sent an unknown command "+r);ht.Bc=void 0},t.onerror=t=>{throw x("worker sent an error! "+t.filename+":"+t.lineno+": "+t.message),t},_&&(t.on("message",(function(e){t.onmessage({data:e})})),t.on("error",(function(e){t.onerror(e)})),t.on("detachedExit",(function(){}))),t.postMessage({cmd:"load",urlOrBlob:u.mainScriptUrlOrBlob||_scriptDir,wasmMemory:j,wasmModule:k})},yc:function(){var t=S("ort-wasm-threaded.worker.js");ht.Yb.push(new Worker(t))},Cc:function(){return 0==ht.Yb.length&&(ht.yc(),ht.Fc(ht.Yb[0])),ht.Yb.pop()}};function dt(t){for(;0>2>>>0];t=a()[t+48>>2>>>0],Te(e,e-t),Me(e)};var mt=[];function gt(t){var e=mt[t];return e||(t>=mt.length&&(mt.length=t+1),mt[t]=e=$.get(t)),e}u.invokeEntryPoint=function(t,e){t=gt(t)(e),Q()?ht.nc(t):Ae(t)};var vt,wt,_t=[],Ot=0,At=0;function St(t){this.Zb=t,this.Sb=t-24,this.xc=function(t){i()[this.Sb+4>>2>>>0]=t},this.bc=function(){return i()[this.Sb+4>>2>>>0]},this.wc=function(t){i()[this.Sb+8>>2>>>0]=t},this.Dc=function(){return i()[this.Sb+8>>2>>>0]},this.rc=function(){a()[this.Sb>>2>>>0]=0},this.hc=function(t){t=t?1:0,e()[this.Sb+12>>0>>>0]=t},this.uc=function(){return 0!=e()[this.Sb+12>>0>>>0]},this.ic=function(t){t=t?1:0,e()[this.Sb+13>>0>>>0]=t},this.kc=function(){return 0!=e()[this.Sb+13>>0>>>0]},this.fc=function(t,e){this.cc(0),this.xc(t),this.wc(e),this.rc(),this.hc(!1),this.ic(!1)},this.sc=function(){Atomics.add(a(),this.Sb>>2,1)},this.Hc=function(){return 1===Atomics.sub(a(),this.Sb>>2,1)},this.cc=function(t){i()[this.Sb+16>>2>>>0]=t},this.tc=function(){return i()[this.Sb+16>>2>>>0]},this.vc=function(){if(Re(this.bc()))return i()[this.Zb>>2>>>0];var t=this.tc();return 0!==t?t:this.Zb}}function Tt(t){return ye(new St(t).Sb)}function Et(t,e,n,r){return O?$t(3,1,t,e,n,r):Mt(t,e,n,r)}function Mt(t,e,n,r){if("undefined"==typeof SharedArrayBuffer)return x("Current environment does not support SharedArrayBuffer, pthreads are not available!"),6;var a=[];return O&&0===a.length?Et(t,e,n,r):(t={Ic:n,Ub:t,zc:r,Nc:a},O?(t.Oc="spawnThread",postMessage(t,a),0):lt(t))}function Ct(t,e,n){return O?$t(4,1,t,e,n):0}function xt(t,e){if(O)return $t(5,1,t,e)}function Rt(t,e){if(O)return $t(6,1,t,e)}function jt(t,e,n){if(O)return $t(7,1,t,e,n)}function kt(t,e,n){return O?$t(8,1,t,e,n):0}function Dt(t,e){if(O)return $t(9,1,t,e)}function Pt(t,e,n){if(O)return $t(10,1,t,e,n)}function Ut(t,e,n,r){if(O)return $t(11,1,t,e,n,r)}function Ft(t,e,n,r){if(O)return $t(12,1,t,e,n,r)}function It(t,e,n,r){if(O)return $t(13,1,t,e,n,r)}function Wt(t){if(O)return $t(14,1,t)}function Ht(t,e){if(O)return $t(15,1,t,e)}function Lt(t,e,n){if(O)return $t(16,1,t,e,n)}function zt(t){Atomics.store(a(),t>>2,1),he()&&_e(t),Atomics.compareExchange(a(),t>>2,1,0)}function Yt(t){return i()[t>>>2]+4294967296*a()[t+4>>>2]}function Bt(t,e,n,r,a,i){return O?$t(17,1,t,e,n,r,a,i):-52}function Gt(t,e,n,r,a,i){if(O)return $t(18,1,t,e,n,r,a,i)}function Nt(t){var n=G(t)+1,r=de(n);return r&&B(t,e(),r,n),r}function Vt(t,e,n){function r(t){return(t=t.toTimeString().match(/\\(([A-Za-z ]+)\\)$/))?t[1]:"GMT"}if(O)return $t(19,1,t,e,n);var o=(new Date).getFullYear(),u=new Date(o,0,1),c=new Date(o,6,1);o=u.getTimezoneOffset();var s=c.getTimezoneOffset(),l=Math.max(o,s);a()[t>>2>>>0]=60*l,a()[e>>2>>>0]=Number(o!=s),t=r(u),e=r(c),t=Nt(t),e=Nt(e),s>2>>>0]=t,i()[n+4>>2>>>0]=e):(i()[n>>2>>>0]=e,i()[n+4>>2>>>0]=t)}function $t(t,e){var n=arguments.length-2,r=arguments;return yt((()=>{for(var a=Ce(8*n),i=a>>3,u=0;u>>0]=c}return we(t,n,a,e)}))}u.executeNotifiedProxyingQueue=zt,wt=_?()=>{var t=process.hrtime();return 1e3*t[0]+t[1]/1e6}:O?()=>performance.now()-u.__performance_now_clock_drift:()=>performance.now();var qt,Xt=[],Jt={};function Zt(){if(!qt){var t,e={USER:"web_user",LOGNAME:"web_user",PATH:"/",PWD:"/",HOME:"/home/web_user",LANG:("object"==typeof navigator&&navigator.languages&&navigator.languages[0]||"C").replace("-","_")+".UTF-8",_:m||"./this.program"};for(t in Jt)void 0===Jt[t]?delete e[t]:e[t]=Jt[t];var n=[];for(t in e)n.push(t+"="+e[t]);qt=n}return qt}function Qt(t,n){if(O)return $t(20,1,t,n);var r=0;return Zt().forEach((function(a,o){var u=n+r;for(o=i()[t+4*o>>2>>>0]=u,u=0;u>0>>>0]=a.charCodeAt(u);e()[o>>0>>>0]=0,r+=a.length+1})),0}function Kt(t,e){if(O)return $t(21,1,t,e);var n=Zt();i()[t>>2>>>0]=n.length;var r=0;return n.forEach((function(t){r+=t.length+1})),i()[e>>2>>>0]=r,0}function te(t){return O?$t(22,1,t):52}function ee(t,e,n,r){return O?$t(23,1,t,e,n,r):52}function ne(t,e,n,r,a){return O?$t(24,1,t,e,n,r,a):70}var re=[null,[],[]];function ae(t,e){var n=re[t];0===e||10===e?((1===t?C:x)(z(n,0)),n.length=0):n.push(e)}function ie(t,e,n,a){if(O)return $t(25,1,t,e,n,a);for(var o=0,u=0;u>2>>>0],s=i()[e+4>>2>>>0];e+=8;for(var l=0;l>>0]);o+=s}return i()[a>>2>>>0]=o,0}var oe=0;function ue(t){return 0==t%4&&(0!=t%100||0==t%400)}var ce=[31,29,31,30,31,30,31,31,30,31,30,31],se=[31,28,31,30,31,30,31,31,30,31,30,31];function le(t,n,r,i){function o(t,e,n){for(t="number"==typeof t?t.toString():t||"";t.lengtht?-1:0r-t.getDate())){t.setDate(t.getDate()+e);break}e-=r-t.getDate()+1,t.setDate(1),11>n?t.setMonth(n+1):(t.setMonth(0),t.setFullYear(t.getFullYear()+1))}return n=new Date(t.getFullYear()+1,0,4),e=s(new Date(t.getFullYear(),0,4)),n=s(n),0>=c(e,t)?0>=c(n,t)?t.getFullYear()+1:t.getFullYear():t.getFullYear()-1}var f=a()[i+40>>2>>>0];for(var p in i={Lc:a()[i>>2>>>0],Kc:a()[i+4>>2>>>0],dc:a()[i+8>>2>>>0],jc:a()[i+12>>2>>>0],ec:a()[i+16>>2>>>0],Xb:a()[i+20>>2>>>0],Tb:a()[i+24>>2>>>0],Wb:a()[i+28>>2>>>0],Rc:a()[i+32>>2>>>0],Jc:a()[i+36>>2>>>0],Mc:f?Y(f):""},r=Y(r),f={"%c":"%a %b %d %H:%M:%S %Y","%D":"%m/%d/%y","%F":"%Y-%m-%d","%h":"%b","%r":"%I:%M:%S %p","%R":"%H:%M","%T":"%H:%M:%S","%x":"%m/%d/%y","%X":"%H:%M:%S","%Ec":"%c","%EC":"%C","%Ex":"%m/%d/%y","%EX":"%H:%M:%S","%Ey":"%y","%EY":"%Y","%Od":"%d","%Oe":"%e","%OH":"%H","%OI":"%I","%Om":"%m","%OM":"%M","%OS":"%S","%Ou":"%u","%OU":"%U","%OV":"%V","%Ow":"%w","%OW":"%W","%Oy":"%y"})r=r.replace(new RegExp(p,"g"),f[p]);var h="Sunday Monday Tuesday Wednesday Thursday Friday Saturday".split(" "),d="January February March April May June July August September October November December".split(" ");for(p in f={"%a":function(t){return h[t.Tb].substring(0,3)},"%A":function(t){return h[t.Tb]},"%b":function(t){return d[t.ec].substring(0,3)},"%B":function(t){return d[t.ec]},"%C":function(t){return u((t.Xb+1900)/100|0,2)},"%d":function(t){return u(t.jc,2)},"%e":function(t){return o(t.jc,2," ")},"%g":function(t){return l(t).toString().substring(2)},"%G":function(t){return l(t)},"%H":function(t){return u(t.dc,2)},"%I":function(t){return 0==(t=t.dc)?t=12:12t.dc?"AM":"PM"},"%S":function(t){return u(t.Lc,2)},"%t":function(){return"\\t"},"%u":function(t){return t.Tb||7},"%U":function(t){return u(Math.floor((t.Wb+7-t.Tb)/7),2)},"%V":function(t){var e=Math.floor((t.Wb+7-(t.Tb+6)%7)/7);if(2>=(t.Tb+371-t.Wb-2)%7&&e++,e)53==e&&(4==(n=(t.Tb+371-t.Wb)%7)||3==n&&ue(t.Xb)||(e=1));else{e=52;var n=(t.Tb+7-t.Wb-1)%7;(4==n||5==n&&ue(t.Xb%400-1))&&e++}return u(e,2)},"%w":function(t){return t.Tb},"%W":function(t){return u(Math.floor((t.Wb+7-(t.Tb+6)%7)/7),2)},"%y":function(t){return(t.Xb+1900).toString().substring(2)},"%Y":function(t){return t.Xb+1900},"%z":function(t){var e=0<=(t=t.Jc);return t=Math.abs(t)/60,(e?"+":"-")+String("0000"+(t/60*100+t%60)).slice(-4)},"%Z":function(t){return t.Mc},"%%":function(){return"%"}},r=r.replace(/%%/g,"\\0\\0"),f)r.includes(p)&&(r=r.replace(new RegExp(p,"g"),f[p](i)));return p=function(t){var e=Array(G(t)+1);return B(t,e,0,e.length),e}(r=r.replace(/\\0\\0/g,"%")),p.length>n?0:(function(t,n){e().set(t,n>>>0)}(p,t),p.length-1)}ht.fc();var fe=[null,ft,bt,Et,Ct,xt,Rt,jt,kt,Dt,Pt,Ut,Ft,It,Wt,Ht,Lt,Bt,Gt,Vt,Qt,Kt,te,ee,ne,ie],pe={b:function(t){return de(t+24)+24},n:function(t){return(t=new St(t)).uc()||(t.hc(!0),Ot--),t.ic(!1),_t.push(t),t.sc(),t.vc()},ma:function(t){throw x("Unexpected exception thrown, this is not properly supported - aborting"),H=!0,t},x:function(){Se(0);var t=_t.pop();if(t.Hc()&&!t.kc()){var e=t.Dc();e&>(e)(t.Zb),Tt(t.Zb)}At=0},e:function(){var t=At;if(!t)return oe=0;var e=new St(t);e.cc(t);var n=e.bc();if(!n)return oe=0,t;for(var r=Array.prototype.slice.call(arguments),a=0;azt(r)));else if(O)postMessage({targetThread:t,cmd:"processProxyingQueue",queue:r});else{if(!(t=ht.Vb[t]))return;t.postMessage({cmd:"processProxyingQueue",queue:r})}return 1},Ea:function(){return-1},Pa:function(t,e){t=new Date(1e3*Yt(t)),a()[e>>2>>>0]=t.getUTCSeconds(),a()[e+4>>2>>>0]=t.getUTCMinutes(),a()[e+8>>2>>>0]=t.getUTCHours(),a()[e+12>>2>>>0]=t.getUTCDate(),a()[e+16>>2>>>0]=t.getUTCMonth(),a()[e+20>>2>>>0]=t.getUTCFullYear()-1900,a()[e+24>>2>>>0]=t.getUTCDay(),t=(t.getTime()-Date.UTC(t.getUTCFullYear(),0,1,0,0,0,0))/864e5|0,a()[e+28>>2>>>0]=t},Qa:function(t,e){t=new Date(1e3*Yt(t)),a()[e>>2>>>0]=t.getSeconds(),a()[e+4>>2>>>0]=t.getMinutes(),a()[e+8>>2>>>0]=t.getHours(),a()[e+12>>2>>>0]=t.getDate(),a()[e+16>>2>>>0]=t.getMonth(),a()[e+20>>2>>>0]=t.getFullYear()-1900,a()[e+24>>2>>>0]=t.getDay();var n=new Date(t.getFullYear(),0,1),r=(t.getTime()-n.getTime())/864e5|0;a()[e+28>>2>>>0]=r,a()[e+36>>2>>>0]=-60*t.getTimezoneOffset(),r=new Date(t.getFullYear(),6,1).getTimezoneOffset(),t=0|(r!=(n=n.getTimezoneOffset())&&t.getTimezoneOffset()==Math.min(n,r)),a()[e+32>>2>>>0]=t},Ra:function(t){var e=new Date(a()[t+20>>2>>>0]+1900,a()[t+16>>2>>>0],a()[t+12>>2>>>0],a()[t+8>>2>>>0],a()[t+4>>2>>>0],a()[t>>2>>>0],0),n=a()[t+32>>2>>>0],r=e.getTimezoneOffset(),i=new Date(e.getFullYear(),0,1),o=new Date(e.getFullYear(),6,1).getTimezoneOffset(),u=i.getTimezoneOffset(),c=Math.min(u,o);return 0>n?a()[t+32>>2>>>0]=Number(o!=u&&c==r):0>2>>>0]=e.getDay(),n=(e.getTime()-i.getTime())/864e5|0,a()[t+28>>2>>>0]=n,a()[t>>2>>>0]=e.getSeconds(),a()[t+4>>2>>>0]=e.getMinutes(),a()[t+8>>2>>>0]=e.getHours(),a()[t+12>>2>>>0]=e.getDate(),a()[t+16>>2>>>0]=e.getMonth(),e.getTime()/1e3|0},Aa:Bt,Ba:Gt,Sa:function t(e,n,r){t.Ac||(t.Ac=!0,Vt(e,n,r))},y:function(){at("")},U:function(){if(!_&&!w){var t="Blocking on the main thread is very dangerous, see https://emscripten.org/docs/porting/pthreads.html#blocking-on-the-main-browser-thread";vt||(vt={}),vt[t]||(vt[t]=1,_&&(t="warning: "+t),x(t))}},ra:function(){return 4294901760},B:wt,Ia:function(t,e,n){r().copyWithin(t>>>0,e>>>0,e+n>>>0)},F:function(){return _?n(993).cpus().length:navigator.hardwareConcurrency},Da:function(t,e,n){Xt.length=e,n>>=3;for(var r=0;r>>0];return(0>t?ut[-t-1]:fe[t]).apply(null,Xt)},qa:function(t){var e=r().length;if((t>>>=0)<=e||4294901760=n;n*=2){var a=e*(1+.2/n);a=Math.min(a,t+100663296);var i=Math;a=Math.max(t,a),i=i.min.call(i,4294901760,a+(65536-a%65536)%65536);t:{try{j.grow(i-D.byteLength+65535>>>16),N(j.buffer);var o=1;break t}catch(t){}o=void 0}if(o)return!0}return!1},Na:function(){throw"unwind"},Ga:Qt,Ha:Kt,J:pt,I:te,S:ee,ga:ne,R:ie,d:function(){return oe},na:function t(r,a){t.lc||(t.lc=function(){if("object"==typeof crypto&&"function"==typeof crypto.getRandomValues){var t=new Uint8Array(1);return()=>(crypto.getRandomValues(t),t[0])}if(_)try{var e=n(Object(function(){var t=new Error("Cannot find module \'crypto\'");throw t.code="MODULE_NOT_FOUND",t}()));return()=>e.randomBytes(1)[0]}catch(t){}return()=>at("randomDevice")}());for(var i=0;i>0>>>0]=t.lc();return 0},ia:function(t,e,n){var r=Ee();try{return gt(t)(e,n)}catch(t){if(Me(r),t!==t+0)throw t;Se(1,0)}},ja:function(t,e,n){var r=Ee();try{return gt(t)(e,n)}catch(t){if(Me(r),t!==t+0)throw t;Se(1,0)}},K:function(t){var e=Ee();try{return gt(t)()}catch(t){if(Me(e),t!==t+0)throw t;Se(1,0)}},f:function(t,e){var n=Ee();try{return gt(t)(e)}catch(t){if(Me(n),t!==t+0)throw t;Se(1,0)}},P:function(t,e,n){var r=Ee();try{return gt(t)(e,n)}catch(t){if(Me(r),t!==t+0)throw t;Se(1,0)}},Q:function(t,e,n){var r=Ee();try{return gt(t)(e,n)}catch(t){if(Me(r),t!==t+0)throw t;Se(1,0)}},k:function(t,e,n){var r=Ee();try{return gt(t)(e,n)}catch(t){if(Me(r),t!==t+0)throw t;Se(1,0)}},p:function(t,e,n,r){var a=Ee();try{return gt(t)(e,n,r)}catch(t){if(Me(a),t!==t+0)throw t;Se(1,0)}},q:function(t,e,n,r,a){var i=Ee();try{return gt(t)(e,n,r,a)}catch(t){if(Me(i),t!==t+0)throw t;Se(1,0)}},N:function(t,e,n,r,a,i){var o=Ee();try{return gt(t)(e,n,r,a,i)}catch(t){if(Me(o),t!==t+0)throw t;Se(1,0)}},s:function(t,e,n,r,a,i){var o=Ee();try{return gt(t)(e,n,r,a,i)}catch(t){if(Me(o),t!==t+0)throw t;Se(1,0)}},w:function(t,e,n,r,a,i,o){var u=Ee();try{return gt(t)(e,n,r,a,i,o)}catch(t){if(Me(u),t!==t+0)throw t;Se(1,0)}},L:function(t,e,n,r,a,i,o,u){var c=Ee();try{return gt(t)(e,n,r,a,i,o,u)}catch(t){if(Me(c),t!==t+0)throw t;Se(1,0)}},E:function(t,e,n,r,a,i,o,u,c,s,l,f){var p=Ee();try{return gt(t)(e,n,r,a,i,o,u,c,s,l,f)}catch(t){if(Me(p),t!==t+0)throw t;Se(1,0)}},aa:function(t,e,n,r,a,i,o,u){var c=Ee();try{return He(t,e,n,r,a,i,o,u)}catch(t){if(Me(c),t!==t+0)throw t;Se(1,0)}},_:function(t,e,n,r,a,i,o){var u=Ee();try{return ke(t,e,n,r,a,i,o)}catch(t){if(Me(u),t!==t+0)throw t;Se(1,0)}},Z:function(t,e,n,r,a){var i=Ee();try{return Le(t,e,n,r,a)}catch(t){if(Me(i),t!==t+0)throw t;Se(1,0)}},ca:function(t,e,n,r){var a=Ee();try{return Ie(t,e,n,r)}catch(t){if(Me(a),t!==t+0)throw t;Se(1,0)}},$:function(t){var e=Ee();try{return je(t)}catch(t){if(Me(e),t!==t+0)throw t;Se(1,0)}},ba:function(t,e){var n=Ee();try{return We(t,e)}catch(t){if(Me(n),t!==t+0)throw t;Se(1,0)}},Y:function(t,e,n){var r=Ee();try{return De(t,e,n)}catch(t){if(Me(r),t!==t+0)throw t;Se(1,0)}},g:function(t){var e=Ee();try{gt(t)()}catch(t){if(Me(e),t!==t+0)throw t;Se(1,0)}},r:function(t,e){var n=Ee();try{gt(t)(e)}catch(t){if(Me(n),t!==t+0)throw t;Se(1,0)}},i:function(t,e,n){var r=Ee();try{gt(t)(e,n)}catch(t){if(Me(r),t!==t+0)throw t;Se(1,0)}},ha:function(t,e,n,r){var a=Ee();try{gt(t)(e,n,r)}catch(t){if(Me(a),t!==t+0)throw t;Se(1,0)}},m:function(t,e,n,r){var a=Ee();try{gt(t)(e,n,r)}catch(t){if(Me(a),t!==t+0)throw t;Se(1,0)}},v:function(t,e,n,r,a){var i=Ee();try{gt(t)(e,n,r,a)}catch(t){if(Me(i),t!==t+0)throw t;Se(1,0)}},u:function(t,e,n,r,a,i){var o=Ee();try{gt(t)(e,n,r,a,i)}catch(t){if(Me(o),t!==t+0)throw t;Se(1,0)}},O:function(t,e,n,r,a,i,o){var u=Ee();try{gt(t)(e,n,r,a,i,o)}catch(t){if(Me(u),t!==t+0)throw t;Se(1,0)}},A:function(t,e,n,r,a,i,o,u){var c=Ee();try{gt(t)(e,n,r,a,i,o,u)}catch(t){if(Me(c),t!==t+0)throw t;Se(1,0)}},ka:function(t,e,n,r,a,i,o,u,c){var s=Ee();try{gt(t)(e,n,r,a,i,o,u,c)}catch(t){if(Me(s),t!==t+0)throw t;Se(1,0)}},C:function(t,e,n,r,a,i,o,u,c,s,l){var f=Ee();try{gt(t)(e,n,r,a,i,o,u,c,s,l)}catch(t){if(Me(f),t!==t+0)throw t;Se(1,0)}},D:function(t,e,n,r,a,i,o,u,c,s,l,f,p,h,d,y){var b=Ee();try{gt(t)(e,n,r,a,i,o,u,c,s,l,f,p,h,d,y)}catch(t){if(Me(b),t!==t+0)throw t;Se(1,0)}},fa:function(t,e,n,r,a,i,o,u){var c=Ee();try{Pe(t,e,n,r,a,i,o,u)}catch(t){if(Me(c),t!==t+0)throw t;Se(1,0)}},da:function(t,e,n,r,a,i,o,u,c,s,l,f){var p=Ee();try{Fe(t,e,n,r,a,i,o,u,c,s,l,f)}catch(t){if(Me(p),t!==t+0)throw t;Se(1,0)}},ea:function(t,e,n,r,a,i){var o=Ee();try{Ue(t,e,n,r,a,i)}catch(t){if(Me(o),t!==t+0)throw t;Se(1,0)}},o:function(t){return t},a:j||u.wasmMemory,G:function(t){oe=t},la:le,z:function(t,e,n,r){return le(t,e,n,r)}};!function(){function t(t,e){u.asm=t.exports,ht.qc.push(u.asm.sb),$=u.asm.ub,X.unshift(u.asm.Va),k=e,O||(et--,u.monitorRunDependencies&&u.monitorRunDependencies(et),0==et&&(null!==nt&&(clearInterval(nt),nt=null),rt&&(t=rt,rt=null,t())))}function e(e){t(e.instance,e.module)}function n(t){return function(){if(!M&&(v||w)){if("function"==typeof fetch&&!tt.startsWith("file://"))return fetch(tt,{credentials:"same-origin"}).then((function(t){if(!t.ok)throw"failed to load wasm binary file at \'"+tt+"\'";return t.arrayBuffer()})).catch((function(){return ot()}));if(f)return new Promise((function(t,e){f(tt,(function(e){t(new Uint8Array(e))}),e)}))}return Promise.resolve().then((function(){return ot()}))}().then((function(t){return WebAssembly.instantiate(t,r)})).then((function(t){return t})).then(t,(function(t){x("failed to asynchronously prepare wasm: "+t),at(t)}))}var r={a:pe};if(O||(et++,u.monitorRunDependencies&&u.monitorRunDependencies(et)),u.instantiateWasm)try{return u.instantiateWasm(r,t)}catch(t){return x("Module.instantiateWasm callback failed with error: "+t),!1}(M||"function"!=typeof WebAssembly.instantiateStreaming||it()||tt.startsWith("file://")||_||"function"!=typeof fetch?n(e):fetch(tt,{credentials:"same-origin"}).then((function(t){return WebAssembly.instantiateStreaming(t,r).then(e,(function(t){return x("wasm streaming compile failed: "+t),x("falling back to ArrayBuffer instantiation"),n(e)}))}))).catch(s)}(),u.___wasm_call_ctors=function(){return(u.___wasm_call_ctors=u.asm.Va).apply(null,arguments)},u._OrtInit=function(){return(u._OrtInit=u.asm.Wa).apply(null,arguments)},u._OrtCreateSessionOptions=function(){return(u._OrtCreateSessionOptions=u.asm.Xa).apply(null,arguments)},u._OrtAppendExecutionProvider=function(){return(u._OrtAppendExecutionProvider=u.asm.Ya).apply(null,arguments)},u._OrtAddSessionConfigEntry=function(){return(u._OrtAddSessionConfigEntry=u.asm.Za).apply(null,arguments)},u._OrtReleaseSessionOptions=function(){return(u._OrtReleaseSessionOptions=u.asm._a).apply(null,arguments)},u._OrtCreateSession=function(){return(u._OrtCreateSession=u.asm.$a).apply(null,arguments)},u._OrtReleaseSession=function(){return(u._OrtReleaseSession=u.asm.ab).apply(null,arguments)},u._OrtGetInputCount=function(){return(u._OrtGetInputCount=u.asm.bb).apply(null,arguments)},u._OrtGetOutputCount=function(){return(u._OrtGetOutputCount=u.asm.cb).apply(null,arguments)},u._OrtGetInputName=function(){return(u._OrtGetInputName=u.asm.db).apply(null,arguments)},u._OrtGetOutputName=function(){return(u._OrtGetOutputName=u.asm.eb).apply(null,arguments)},u._OrtFree=function(){return(u._OrtFree=u.asm.fb).apply(null,arguments)},u._OrtCreateTensor=function(){return(u._OrtCreateTensor=u.asm.gb).apply(null,arguments)},u._OrtGetTensorData=function(){return(u._OrtGetTensorData=u.asm.hb).apply(null,arguments)},u._OrtReleaseTensor=function(){return(u._OrtReleaseTensor=u.asm.ib).apply(null,arguments)},u._OrtCreateRunOptions=function(){return(u._OrtCreateRunOptions=u.asm.jb).apply(null,arguments)},u._OrtAddRunConfigEntry=function(){return(u._OrtAddRunConfigEntry=u.asm.kb).apply(null,arguments)},u._OrtReleaseRunOptions=function(){return(u._OrtReleaseRunOptions=u.asm.lb).apply(null,arguments)},u._OrtRun=function(){return(u._OrtRun=u.asm.mb).apply(null,arguments)},u._OrtEndProfiling=function(){return(u._OrtEndProfiling=u.asm.nb).apply(null,arguments)};var he=u._pthread_self=function(){return(he=u._pthread_self=u.asm.ob).apply(null,arguments)},de=u._malloc=function(){return(de=u._malloc=u.asm.pb).apply(null,arguments)},ye=u._free=function(){return(ye=u._free=u.asm.qb).apply(null,arguments)},be=u._fflush=function(){return(be=u._fflush=u.asm.rb).apply(null,arguments)};u.__emscripten_tls_init=function(){return(u.__emscripten_tls_init=u.asm.sb).apply(null,arguments)};var me=u.___funcs_on_exit=function(){return(me=u.___funcs_on_exit=u.asm.tb).apply(null,arguments)},ge=u.__emscripten_thread_init=function(){return(ge=u.__emscripten_thread_init=u.asm.vb).apply(null,arguments)};u.__emscripten_thread_crashed=function(){return(u.__emscripten_thread_crashed=u.asm.wb).apply(null,arguments)};var ve,we=u._emscripten_run_in_main_runtime_thread_js=function(){return(we=u._emscripten_run_in_main_runtime_thread_js=u.asm.xb).apply(null,arguments)},_e=u.__emscripten_proxy_execute_task_queue=function(){return(_e=u.__emscripten_proxy_execute_task_queue=u.asm.yb).apply(null,arguments)},Oe=u.__emscripten_thread_free_data=function(){return(Oe=u.__emscripten_thread_free_data=u.asm.zb).apply(null,arguments)},Ae=u.__emscripten_thread_exit=function(){return(Ae=u.__emscripten_thread_exit=u.asm.Ab).apply(null,arguments)},Se=u._setThrew=function(){return(Se=u._setThrew=u.asm.Bb).apply(null,arguments)},Te=u._emscripten_stack_set_limits=function(){return(Te=u._emscripten_stack_set_limits=u.asm.Cb).apply(null,arguments)},Ee=u.stackSave=function(){return(Ee=u.stackSave=u.asm.Db).apply(null,arguments)},Me=u.stackRestore=function(){return(Me=u.stackRestore=u.asm.Eb).apply(null,arguments)},Ce=u.stackAlloc=function(){return(Ce=u.stackAlloc=u.asm.Fb).apply(null,arguments)},xe=u.___cxa_can_catch=function(){return(xe=u.___cxa_can_catch=u.asm.Gb).apply(null,arguments)},Re=u.___cxa_is_pointer_type=function(){return(Re=u.___cxa_is_pointer_type=u.asm.Hb).apply(null,arguments)},je=u.dynCall_j=function(){return(je=u.dynCall_j=u.asm.Ib).apply(null,arguments)},ke=u.dynCall_iiiiij=function(){return(ke=u.dynCall_iiiiij=u.asm.Jb).apply(null,arguments)},De=u.dynCall_jii=function(){return(De=u.dynCall_jii=u.asm.Kb).apply(null,arguments)},Pe=u.dynCall_viiiiij=function(){return(Pe=u.dynCall_viiiiij=u.asm.Lb).apply(null,arguments)},Ue=u.dynCall_vjji=function(){return(Ue=u.dynCall_vjji=u.asm.Mb).apply(null,arguments)},Fe=u.dynCall_viiijjjii=function(){return(Fe=u.dynCall_viiijjjii=u.asm.Nb).apply(null,arguments)},Ie=u.dynCall_iij=function(){return(Ie=u.dynCall_iij=u.asm.Ob).apply(null,arguments)},We=u.dynCall_ji=function(){return(We=u.dynCall_ji=u.asm.Pb).apply(null,arguments)},He=u.dynCall_iiiiiij=function(){return(He=u.dynCall_iiiiiij=u.asm.Qb).apply(null,arguments)},Le=u.dynCall_iiij=function(){return(Le=u.dynCall_iiij=u.asm.Rb).apply(null,arguments)};function ze(){function t(){if(!ve&&(ve=!0,u.calledRun=!0,!H)&&(O||dt(X),c(u),u.onRuntimeInitialized&&u.onRuntimeInitialized(),!O)){if(u.postRun)for("function"==typeof u.postRun&&(u.postRun=[u.postRun]);u.postRun.length;){var t=u.postRun.shift();Z.unshift(t)}dt(Z)}}if(!(0{var _scriptDir,r=(_scriptDir=(_scriptDir="undefined"!=typeof document&&document.currentScript?document.currentScript.src:void 0)||"/index.js",function(t){var e,r,a;t=t||{},e||(e=void 0!==t?t:{}),e.ready=new Promise((function(t,e){r=t,a=e}));var i,o,u,c,s,l,f=Object.assign({},e),p="./this.program",h=(t,e)=>{throw e},d="object"==typeof window,y="function"==typeof importScripts,b="object"==typeof process&&"object"==typeof process.versions&&"string"==typeof process.versions.node,m="";b?(m=y?n(908).dirname(m)+"/":"//",l=()=>{s||(c=n(384),s=n(908))},i=function(t,e){return l(),t=s.normalize(t),c.readFileSync(t,e?void 0:"utf8")},u=t=>((t=i(t,!0)).buffer||(t=new Uint8Array(t)),t),o=(t,e,n)=>{l(),t=s.normalize(t),c.readFile(t,(function(t,r){t?n(t):e(r.buffer)}))},1{if(_||0{var e=new XMLHttpRequest;return e.open("GET",t,!1),e.send(null),e.responseText},y&&(u=t=>{var e=new XMLHttpRequest;return e.open("GET",t,!1),e.responseType="arraybuffer",e.send(null),new Uint8Array(e.response)}),o=(t,e,n)=>{var r=new XMLHttpRequest;r.open("GET",t,!0),r.responseType="arraybuffer",r.onload=()=>{200==r.status||0==r.status&&r.response?e(r.response):n()},r.onerror=n,r.send(null)});var g,v=e.print||console.log.bind(console),w=e.printErr||console.warn.bind(console);Object.assign(e,f),f=null,e.thisProgram&&(p=e.thisProgram),e.quit&&(h=e.quit),e.wasmBinary&&(g=e.wasmBinary);var _=e.noExitRuntime||!1;"object"!=typeof WebAssembly&&V("no native wasm support detected");var O,A,S,T,E,M,C=!1,x="undefined"!=typeof TextDecoder?new TextDecoder("utf8"):void 0;function R(t,e,n){var r=(e>>>=0)+n;for(n=e;t[n]&&!(n>=r);)++n;if(16(a=224==(240&a)?(15&a)<<12|i<<6|o:(7&a)<<18|i<<12|o<<6|63&t[e++])?r+=String.fromCharCode(a):(a-=65536,r+=String.fromCharCode(55296|a>>10,56320|1023&a))}}else r+=String.fromCharCode(a)}return r}function j(t,e){return(t>>>=0)?R(T,t,e):""}function k(t,e,n,r){if(!(0>>=0;r=n+r-1;for(var i=0;i=o&&(o=65536+((1023&o)<<10)|1023&t.charCodeAt(++i)),127>=o){if(n>=r)break;e[n++>>>0]=o}else{if(2047>=o){if(n+1>=r)break;e[n++>>>0]=192|o>>6}else{if(65535>=o){if(n+2>=r)break;e[n++>>>0]=224|o>>12}else{if(n+3>=r)break;e[n++>>>0]=240|o>>18,e[n++>>>0]=128|o>>12&63}e[n++>>>0]=128|o>>6&63}e[n++>>>0]=128|63&o}}return e[n>>>0]=0,n-a}function D(t){for(var e=0,n=0;n=r?e++:2047>=r?e+=2:55296<=r&&57343>=r?(e+=4,++n):e+=3}return e}function P(){var t=O.buffer;A=t,e.HEAP8=S=new Int8Array(t),e.HEAP16=new Int16Array(t),e.HEAP32=E=new Int32Array(t),e.HEAPU8=T=new Uint8Array(t),e.HEAPU16=new Uint16Array(t),e.HEAPU32=M=new Uint32Array(t),e.HEAPF32=new Float32Array(t),e.HEAPF64=new Float64Array(t)}var U,F=[],I=[],W=[],H=[],L=0;function z(){var t=e.preRun.shift();F.unshift(t)}var Y,B=0,G=null,N=null;function V(t){throw e.onAbort&&e.onAbort(t),w(t="Aborted("+t+")"),C=!0,t=new WebAssembly.RuntimeError(t+". Build with -sASSERTIONS for more info."),a(t),t}function $(){return Y.startsWith("data:application/octet-stream;base64,")}if(Y="ort-wasm.wasm",!$()){var q=Y;Y=e.locateFile?e.locateFile(q,m):m+q}function X(){var t=Y;try{if(t==Y&&g)return new Uint8Array(g);if(u)return u(t);throw"both async and sync fetching of the wasm failed"}catch(t){V(t)}}function J(t){this.name="ExitStatus",this.message="Program terminated with exit("+t+")",this.status=t}function Z(t){for(;0>2>>>0]=t},this.Eb=function(){return M[this.zb+4>>2>>>0]},this.Sb=function(t){M[this.zb+8>>2>>>0]=t},this.Wb=function(){return M[this.zb+8>>2>>>0]},this.Tb=function(){E[this.zb>>2>>>0]=0},this.Ib=function(t){S[this.zb+12>>0>>>0]=t?1:0},this.Pb=function(){return 0!=S[this.zb+12>>0>>>0]},this.Jb=function(t){S[this.zb+13>>0>>>0]=t?1:0},this.Lb=function(){return 0!=S[this.zb+13>>0>>>0]},this.Rb=function(t,e){this.Fb(0),this.Ub(t),this.Sb(e),this.Tb(),this.Ib(!1),this.Jb(!1)},this.Nb=function(){E[this.zb>>2>>>0]+=1},this.Xb=function(){var t=E[this.zb>>2>>>0];return E[this.zb>>2>>>0]=t-1,1===t},this.Fb=function(t){M[this.zb+16>>2>>>0]=t},this.Ob=function(){return M[this.zb+16>>2>>>0]},this.Qb=function(){if(Mt(this.Eb()))return M[this.Db>>2>>>0];var t=this.Ob();return 0!==t?t:this.Db}}function nt(t){return vt(new et(t).zb)}var rt=[];function at(t){var e=rt[t];return e||(t>=rt.length&&(rt.length=t+1),rt[t]=e=U.get(t)),e}function it(t){var e=D(t)+1,n=gt(e);return n&&k(t,S,n,e),n}var ot={};function ut(){if(!ct){var t,e={USER:"web_user",LOGNAME:"web_user",PATH:"/",PWD:"/",HOME:"/home/web_user",LANG:("object"==typeof navigator&&navigator.languages&&navigator.languages[0]||"C").replace("-","_")+".UTF-8",_:p||"./this.program"};for(t in ot)void 0===ot[t]?delete e[t]:e[t]=ot[t];var n=[];for(t in e)n.push(t+"="+e[t]);ct=n}return ct}var ct,st=[null,[],[]];function lt(t,e){var n=st[t];0===e||10===e?((1===t?v:w)(R(n,0)),n.length=0):n.push(e)}var ft=0;function pt(t){return 0==t%4&&(0!=t%100||0==t%400)}var ht=[31,29,31,30,31,30,31,31,30,31,30,31],dt=[31,28,31,30,31,30,31,31,30,31,30,31];function yt(t,e,n,r){function a(t,e,n){for(t="number"==typeof t?t.toString():t||"";t.lengtht?-1:0r-t.getDate())){t.setDate(t.getDate()+e);break}e-=r-t.getDate()+1,t.setDate(1),11>n?t.setMonth(n+1):(t.setMonth(0),t.setFullYear(t.getFullYear()+1))}return n=new Date(t.getFullYear()+1,0,4),e=u(new Date(t.getFullYear(),0,4)),n=u(n),0>=o(e,t)?0>=o(n,t)?t.getFullYear()+1:t.getFullYear():t.getFullYear()-1}var s=E[r+40>>2>>>0];for(var l in r={$b:E[r>>2>>>0],Zb:E[r+4>>2>>>0],Gb:E[r+8>>2>>>0],Kb:E[r+12>>2>>>0],Hb:E[r+16>>2>>>0],Cb:E[r+20>>2>>>0],Ab:E[r+24>>2>>>0],Bb:E[r+28>>2>>>0],bc:E[r+32>>2>>>0],Yb:E[r+36>>2>>>0],ac:s?j(s):""},n=j(n),s={"%c":"%a %b %d %H:%M:%S %Y","%D":"%m/%d/%y","%F":"%Y-%m-%d","%h":"%b","%r":"%I:%M:%S %p","%R":"%H:%M","%T":"%H:%M:%S","%x":"%m/%d/%y","%X":"%H:%M:%S","%Ec":"%c","%EC":"%C","%Ex":"%m/%d/%y","%EX":"%H:%M:%S","%Ey":"%y","%EY":"%Y","%Od":"%d","%Oe":"%e","%OH":"%H","%OI":"%I","%Om":"%m","%OM":"%M","%OS":"%S","%Ou":"%u","%OU":"%U","%OV":"%V","%Ow":"%w","%OW":"%W","%Oy":"%y"})n=n.replace(new RegExp(l,"g"),s[l]);var f="Sunday Monday Tuesday Wednesday Thursday Friday Saturday".split(" "),p="January February March April May June July August September October November December".split(" ");for(l in s={"%a":function(t){return f[t.Ab].substring(0,3)},"%A":function(t){return f[t.Ab]},"%b":function(t){return p[t.Hb].substring(0,3)},"%B":function(t){return p[t.Hb]},"%C":function(t){return i((t.Cb+1900)/100|0,2)},"%d":function(t){return i(t.Kb,2)},"%e":function(t){return a(t.Kb,2," ")},"%g":function(t){return c(t).toString().substring(2)},"%G":function(t){return c(t)},"%H":function(t){return i(t.Gb,2)},"%I":function(t){return 0==(t=t.Gb)?t=12:12t.Gb?"AM":"PM"},"%S":function(t){return i(t.$b,2)},"%t":function(){return"\\t"},"%u":function(t){return t.Ab||7},"%U":function(t){return i(Math.floor((t.Bb+7-t.Ab)/7),2)},"%V":function(t){var e=Math.floor((t.Bb+7-(t.Ab+6)%7)/7);if(2>=(t.Ab+371-t.Bb-2)%7&&e++,e)53==e&&(4==(n=(t.Ab+371-t.Bb)%7)||3==n&&pt(t.Cb)||(e=1));else{e=52;var n=(t.Ab+7-t.Bb-1)%7;(4==n||5==n&&pt(t.Cb%400-1))&&e++}return i(e,2)},"%w":function(t){return t.Ab},"%W":function(t){return i(Math.floor((t.Bb+7-(t.Ab+6)%7)/7),2)},"%y":function(t){return(t.Cb+1900).toString().substring(2)},"%Y":function(t){return t.Cb+1900},"%z":function(t){var e=0<=(t=t.Yb);return t=Math.abs(t)/60,(e?"+":"-")+String("0000"+(t/60*100+t%60)).slice(-4)},"%Z":function(t){return t.ac},"%%":function(){return"%"}},n=n.replace(/%%/g,"\\0\\0"),s)n.includes(l)&&(n=n.replace(new RegExp(l,"g"),s[l](r)));return l=function(t){var e=Array(D(t)+1);return k(t,e,0,e.length),e}(n=n.replace(/\\0\\0/g,"%")),l.length>e?0:(S.set(l,t>>>0),l.length-1)}var bt={a:function(t){return gt(t+24)+24},m:function(t){return(t=new et(t)).Pb()||(t.Ib(!0),K--),t.Jb(!1),Q.push(t),t.Nb(),t.Qb()},ia:function(t){throw w("Unexpected exception thrown, this is not properly supported - aborting"),C=!0,t},w:function(){Ot(0);var t=Q.pop();if(t.Xb()&&!t.Lb()){var e=t.Wb();e&&at(e)(t.Db),nt(t.Db)}tt=0},d:function(){var t=tt;if(!t)return ft=0;var e=new et(t);e.Fb(t);var n=e.Eb();if(!n)return ft=0,t;for(var r=Array.prototype.slice.call(arguments),a=0;a>>2]+4294967296*E[t+4>>>2])),E[e>>2>>>0]=t.getUTCSeconds(),E[e+4>>2>>>0]=t.getUTCMinutes(),E[e+8>>2>>>0]=t.getUTCHours(),E[e+12>>2>>>0]=t.getUTCDate(),E[e+16>>2>>>0]=t.getUTCMonth(),E[e+20>>2>>>0]=t.getUTCFullYear()-1900,E[e+24>>2>>>0]=t.getUTCDay(),E[e+28>>2>>>0]=(t.getTime()-Date.UTC(t.getUTCFullYear(),0,1,0,0,0,0))/864e5|0},Ea:function(t,e){t=new Date(1e3*(M[t>>>2]+4294967296*E[t+4>>>2])),E[e>>2>>>0]=t.getSeconds(),E[e+4>>2>>>0]=t.getMinutes(),E[e+8>>2>>>0]=t.getHours(),E[e+12>>2>>>0]=t.getDate(),E[e+16>>2>>>0]=t.getMonth(),E[e+20>>2>>>0]=t.getFullYear()-1900,E[e+24>>2>>>0]=t.getDay();var n=new Date(t.getFullYear(),0,1);E[e+28>>2>>>0]=(t.getTime()-n.getTime())/864e5|0,E[e+36>>2>>>0]=-60*t.getTimezoneOffset();var r=new Date(t.getFullYear(),6,1).getTimezoneOffset();n=n.getTimezoneOffset(),E[e+32>>2>>>0]=0|(r!=n&&t.getTimezoneOffset()==Math.min(n,r))},Fa:function(t){var e=new Date(E[t+20>>2>>>0]+1900,E[t+16>>2>>>0],E[t+12>>2>>>0],E[t+8>>2>>>0],E[t+4>>2>>>0],E[t>>2>>>0],0),n=E[t+32>>2>>>0],r=e.getTimezoneOffset(),a=new Date(e.getFullYear(),0,1),i=new Date(e.getFullYear(),6,1).getTimezoneOffset(),o=a.getTimezoneOffset(),u=Math.min(o,i);return 0>n?E[t+32>>2>>>0]=Number(i!=o&&u==r):0>2>>>0]=e.getDay(),E[t+28>>2>>>0]=(e.getTime()-a.getTime())/864e5|0,E[t>>2>>>0]=e.getSeconds(),E[t+4>>2>>>0]=e.getMinutes(),E[t+8>>2>>>0]=e.getHours(),E[t+12>>2>>>0]=e.getDate(),E[t+16>>2>>>0]=e.getMonth(),e.getTime()/1e3|0},sa:function(){return-52},ta:function(){},Ga:function t(e,n,r){t.Vb||(t.Vb=!0,function(t,e,n){function r(t){return(t=t.toTimeString().match(/\\(([A-Za-z ]+)\\)$/))?t[1]:"GMT"}var a=(new Date).getFullYear(),i=new Date(a,0,1),o=new Date(a,6,1);a=i.getTimezoneOffset();var u=o.getTimezoneOffset();E[t>>2>>>0]=60*Math.max(a,u),E[e>>2>>>0]=Number(a!=u),t=r(i),e=r(o),t=it(t),e=it(e),u>2>>>0]=t,M[n+4>>2>>>0]=e):(M[n>>2>>>0]=e,M[n+4>>2>>>0]=t)}(e,n,r))},B:function(){V("")},ma:function(){return 4294901760},I:b?()=>{var t=process.hrtime();return 1e3*t[0]+t[1]/1e6}:()=>performance.now(),xa:function(t,e,n){T.copyWithin(t>>>0,e>>>0,e+n>>>0)},G:function(t){var e=T.length;if(4294901760<(t>>>=0))return!1;for(var n=1;4>=n;n*=2){var r=e*(1+.2/n);r=Math.min(r,t+100663296);var a=Math;r=Math.max(t,r),a=a.min.call(a,4294901760,r+(65536-r%65536)%65536);t:{try{O.grow(a-A.byteLength+65535>>>16),P();var i=1;break t}catch(t){}i=void 0}if(i)return!0}return!1},va:function(t,e){var n=0;return ut().forEach((function(r,a){var i=e+n;for(a=M[t+4*a>>2>>>0]=i,i=0;i>0>>>0]=r.charCodeAt(i);S[a>>0>>>0]=0,n+=r.length+1})),0},wa:function(t,e){var n=ut();M[t>>2>>>0]=n.length;var r=0;return n.forEach((function(t){r+=t.length+1})),M[e>>2>>>0]=r,0},ba:function(t){_||0>2>>>0],u=M[e+4>>2>>>0];e+=8;for(var c=0;c>>0]);a+=u}return M[r>>2>>>0]=a,0},c:function(){return ft},ja:function t(e,r){t.Mb||(t.Mb=function(){if("object"==typeof crypto&&"function"==typeof crypto.getRandomValues){var t=new Uint8Array(1);return()=>(crypto.getRandomValues(t),t[0])}if(b)try{var e=n(Object(function(){var t=new Error("Cannot find module \'crypto\'");throw t.code="MODULE_NOT_FOUND",t}()));return()=>e.randomBytes(1)[0]}catch(t){}return()=>V("randomDevice")}());for(var a=0;a>0>>>0]=t.Mb();return 0},ea:function(t,e,n){var r=At();try{return at(t)(e,n)}catch(t){if(St(r),t!==t+0)throw t;Ot(1,0)}},fa:function(t,e,n){var r=At();try{return at(t)(e,n)}catch(t){if(St(r),t!==t+0)throw t;Ot(1,0)}},J:function(t){var e=At();try{return at(t)()}catch(t){if(St(e),t!==t+0)throw t;Ot(1,0)}},e:function(t,e){var n=At();try{return at(t)(e)}catch(t){if(St(n),t!==t+0)throw t;Ot(1,0)}},N:function(t,e,n){var r=At();try{return at(t)(e,n)}catch(t){if(St(r),t!==t+0)throw t;Ot(1,0)}},O:function(t,e,n){var r=At();try{return at(t)(e,n)}catch(t){if(St(r),t!==t+0)throw t;Ot(1,0)}},j:function(t,e,n){var r=At();try{return at(t)(e,n)}catch(t){if(St(r),t!==t+0)throw t;Ot(1,0)}},o:function(t,e,n,r){var a=At();try{return at(t)(e,n,r)}catch(t){if(St(a),t!==t+0)throw t;Ot(1,0)}},p:function(t,e,n,r,a){var i=At();try{return at(t)(e,n,r,a)}catch(t){if(St(i),t!==t+0)throw t;Ot(1,0)}},M:function(t,e,n,r,a,i){var o=At();try{return at(t)(e,n,r,a,i)}catch(t){if(St(o),t!==t+0)throw t;Ot(1,0)}},r:function(t,e,n,r,a,i){var o=At();try{return at(t)(e,n,r,a,i)}catch(t){if(St(o),t!==t+0)throw t;Ot(1,0)}},v:function(t,e,n,r,a,i,o){var u=At();try{return at(t)(e,n,r,a,i,o)}catch(t){if(St(u),t!==t+0)throw t;Ot(1,0)}},K:function(t,e,n,r,a,i,o,u){var c=At();try{return at(t)(e,n,r,a,i,o,u)}catch(t){if(St(c),t!==t+0)throw t;Ot(1,0)}},D:function(t,e,n,r,a,i,o,u,c,s,l,f){var p=At();try{return at(t)(e,n,r,a,i,o,u,c,s,l,f)}catch(t){if(St(p),t!==t+0)throw t;Ot(1,0)}},X:function(t,e,n,r,a,i,o,u){var c=At();try{return Ft(t,e,n,r,a,i,o,u)}catch(t){if(St(c),t!==t+0)throw t;Ot(1,0)}},V:function(t,e,n,r,a,i,o){var u=At();try{return xt(t,e,n,r,a,i,o)}catch(t){if(St(u),t!==t+0)throw t;Ot(1,0)}},U:function(t,e,n,r,a){var i=At();try{return It(t,e,n,r,a)}catch(t){if(St(i),t!==t+0)throw t;Ot(1,0)}},Z:function(t,e,n,r){var a=At();try{return Pt(t,e,n,r)}catch(t){if(St(a),t!==t+0)throw t;Ot(1,0)}},W:function(t){var e=At();try{return Ct(t)}catch(t){if(St(e),t!==t+0)throw t;Ot(1,0)}},Y:function(t,e){var n=At();try{return Ut(t,e)}catch(t){if(St(n),t!==t+0)throw t;Ot(1,0)}},T:function(t,e,n){var r=At();try{return Rt(t,e,n)}catch(t){if(St(r),t!==t+0)throw t;Ot(1,0)}},f:function(t){var e=At();try{at(t)()}catch(t){if(St(e),t!==t+0)throw t;Ot(1,0)}},q:function(t,e){var n=At();try{at(t)(e)}catch(t){if(St(n),t!==t+0)throw t;Ot(1,0)}},h:function(t,e,n){var r=At();try{at(t)(e,n)}catch(t){if(St(r),t!==t+0)throw t;Ot(1,0)}},da:function(t,e,n,r){var a=At();try{at(t)(e,n,r)}catch(t){if(St(a),t!==t+0)throw t;Ot(1,0)}},l:function(t,e,n,r){var a=At();try{at(t)(e,n,r)}catch(t){if(St(a),t!==t+0)throw t;Ot(1,0)}},t:function(t,e,n,r,a){var i=At();try{at(t)(e,n,r,a)}catch(t){if(St(i),t!==t+0)throw t;Ot(1,0)}},u:function(t,e,n,r,a,i){var o=At();try{at(t)(e,n,r,a,i)}catch(t){if(St(o),t!==t+0)throw t;Ot(1,0)}},x:function(t,e,n,r,a,i,o){var u=At();try{at(t)(e,n,r,a,i,o)}catch(t){if(St(u),t!==t+0)throw t;Ot(1,0)}},z:function(t,e,n,r,a,i,o,u){var c=At();try{at(t)(e,n,r,a,i,o,u)}catch(t){if(St(c),t!==t+0)throw t;Ot(1,0)}},ga:function(t,e,n,r,a,i,o,u,c){var s=At();try{at(t)(e,n,r,a,i,o,u,c)}catch(t){if(St(s),t!==t+0)throw t;Ot(1,0)}},A:function(t,e,n,r,a,i,o,u,c,s,l){var f=At();try{at(t)(e,n,r,a,i,o,u,c,s,l)}catch(t){if(St(f),t!==t+0)throw t;Ot(1,0)}},C:function(t,e,n,r,a,i,o,u,c,s,l,f,p,h,d,y){var b=At();try{at(t)(e,n,r,a,i,o,u,c,s,l,f,p,h,d,y)}catch(t){if(St(b),t!==t+0)throw t;Ot(1,0)}},aa:function(t,e,n,r,a,i,o,u){var c=At();try{jt(t,e,n,r,a,i,o,u)}catch(t){if(St(c),t!==t+0)throw t;Ot(1,0)}},_:function(t,e,n,r,a,i,o,u,c,s,l,f){var p=At();try{Dt(t,e,n,r,a,i,o,u,c,s,l,f)}catch(t){if(St(p),t!==t+0)throw t;Ot(1,0)}},$:function(t,e,n,r,a,i){var o=At();try{kt(t,e,n,r,a,i)}catch(t){if(St(o),t!==t+0)throw t;Ot(1,0)}},n:function(t){return t},F:function(t){ft=t},ha:yt,y:function(t,e,n,r){return yt(t,e,n,r)}};!function(){function t(t){e.asm=t.exports,O=e.asm.Ka,P(),U=e.asm.ib,I.unshift(e.asm.La),B--,e.monitorRunDependencies&&e.monitorRunDependencies(B),0==B&&(null!==G&&(clearInterval(G),G=null),N&&(t=N,N=null,t()))}function n(e){t(e.instance)}function r(t){return function(){if(!g&&(d||y)){if("function"==typeof fetch&&!Y.startsWith("file://"))return fetch(Y,{credentials:"same-origin"}).then((function(t){if(!t.ok)throw"failed to load wasm binary file at \'"+Y+"\'";return t.arrayBuffer()})).catch((function(){return X()}));if(o)return new Promise((function(t,e){o(Y,(function(e){t(new Uint8Array(e))}),e)}))}return Promise.resolve().then((function(){return X()}))}().then((function(t){return WebAssembly.instantiate(t,i)})).then((function(t){return t})).then(t,(function(t){w("failed to asynchronously prepare wasm: "+t),V(t)}))}var i={a:bt};if(B++,e.monitorRunDependencies&&e.monitorRunDependencies(B),e.instantiateWasm)try{return e.instantiateWasm(i,t)}catch(t){return w("Module.instantiateWasm callback failed with error: "+t),!1}(g||"function"!=typeof WebAssembly.instantiateStreaming||$()||Y.startsWith("file://")||b||"function"!=typeof fetch?r(n):fetch(Y,{credentials:"same-origin"}).then((function(t){return WebAssembly.instantiateStreaming(t,i).then(n,(function(t){return w("wasm streaming compile failed: "+t),w("falling back to ArrayBuffer instantiation"),r(n)}))}))).catch(a)}(),e.___wasm_call_ctors=function(){return(e.___wasm_call_ctors=e.asm.La).apply(null,arguments)},e._OrtInit=function(){return(e._OrtInit=e.asm.Ma).apply(null,arguments)},e._OrtCreateSessionOptions=function(){return(e._OrtCreateSessionOptions=e.asm.Na).apply(null,arguments)},e._OrtAppendExecutionProvider=function(){return(e._OrtAppendExecutionProvider=e.asm.Oa).apply(null,arguments)},e._OrtAddSessionConfigEntry=function(){return(e._OrtAddSessionConfigEntry=e.asm.Pa).apply(null,arguments)},e._OrtReleaseSessionOptions=function(){return(e._OrtReleaseSessionOptions=e.asm.Qa).apply(null,arguments)},e._OrtCreateSession=function(){return(e._OrtCreateSession=e.asm.Ra).apply(null,arguments)},e._OrtReleaseSession=function(){return(e._OrtReleaseSession=e.asm.Sa).apply(null,arguments)},e._OrtGetInputCount=function(){return(e._OrtGetInputCount=e.asm.Ta).apply(null,arguments)},e._OrtGetOutputCount=function(){return(e._OrtGetOutputCount=e.asm.Ua).apply(null,arguments)},e._OrtGetInputName=function(){return(e._OrtGetInputName=e.asm.Va).apply(null,arguments)},e._OrtGetOutputName=function(){return(e._OrtGetOutputName=e.asm.Wa).apply(null,arguments)},e._OrtFree=function(){return(e._OrtFree=e.asm.Xa).apply(null,arguments)},e._OrtCreateTensor=function(){return(e._OrtCreateTensor=e.asm.Ya).apply(null,arguments)},e._OrtGetTensorData=function(){return(e._OrtGetTensorData=e.asm.Za).apply(null,arguments)},e._OrtReleaseTensor=function(){return(e._OrtReleaseTensor=e.asm._a).apply(null,arguments)},e._OrtCreateRunOptions=function(){return(e._OrtCreateRunOptions=e.asm.$a).apply(null,arguments)},e._OrtAddRunConfigEntry=function(){return(e._OrtAddRunConfigEntry=e.asm.ab).apply(null,arguments)},e._OrtReleaseRunOptions=function(){return(e._OrtReleaseRunOptions=e.asm.bb).apply(null,arguments)},e._OrtRun=function(){return(e._OrtRun=e.asm.cb).apply(null,arguments)},e._OrtEndProfiling=function(){return(e._OrtEndProfiling=e.asm.db).apply(null,arguments)};var mt,gt=e._malloc=function(){return(gt=e._malloc=e.asm.eb).apply(null,arguments)},vt=e._free=function(){return(vt=e._free=e.asm.fb).apply(null,arguments)},wt=e._fflush=function(){return(wt=e._fflush=e.asm.gb).apply(null,arguments)},_t=e.___funcs_on_exit=function(){return(_t=e.___funcs_on_exit=e.asm.hb).apply(null,arguments)},Ot=e._setThrew=function(){return(Ot=e._setThrew=e.asm.jb).apply(null,arguments)},At=e.stackSave=function(){return(At=e.stackSave=e.asm.kb).apply(null,arguments)},St=e.stackRestore=function(){return(St=e.stackRestore=e.asm.lb).apply(null,arguments)},Tt=e.stackAlloc=function(){return(Tt=e.stackAlloc=e.asm.mb).apply(null,arguments)},Et=e.___cxa_can_catch=function(){return(Et=e.___cxa_can_catch=e.asm.nb).apply(null,arguments)},Mt=e.___cxa_is_pointer_type=function(){return(Mt=e.___cxa_is_pointer_type=e.asm.ob).apply(null,arguments)},Ct=e.dynCall_j=function(){return(Ct=e.dynCall_j=e.asm.pb).apply(null,arguments)},xt=e.dynCall_iiiiij=function(){return(xt=e.dynCall_iiiiij=e.asm.qb).apply(null,arguments)},Rt=e.dynCall_jii=function(){return(Rt=e.dynCall_jii=e.asm.rb).apply(null,arguments)},jt=e.dynCall_viiiiij=function(){return(jt=e.dynCall_viiiiij=e.asm.sb).apply(null,arguments)},kt=e.dynCall_vjji=function(){return(kt=e.dynCall_vjji=e.asm.tb).apply(null,arguments)},Dt=e.dynCall_viiijjjii=function(){return(Dt=e.dynCall_viiijjjii=e.asm.ub).apply(null,arguments)},Pt=e.dynCall_iij=function(){return(Pt=e.dynCall_iij=e.asm.vb).apply(null,arguments)},Ut=e.dynCall_ji=function(){return(Ut=e.dynCall_ji=e.asm.wb).apply(null,arguments)},Ft=e.dynCall_iiiiiij=function(){return(Ft=e.dynCall_iiiiiij=e.asm.xb).apply(null,arguments)},It=e.dynCall_iiij=function(){return(It=e.dynCall_iiij=e.asm.yb).apply(null,arguments)};function Wt(){function t(){if(!mt&&(mt=!0,e.calledRun=!0,!C)){if(Z(I),r(e),e.onRuntimeInitialized&&e.onRuntimeInitialized(),e.postRun)for("function"==typeof e.postRun&&(e.postRun=[e.postRun]);e.postRun.length;){var t=e.postRun.shift();H.unshift(t)}Z(H)}}if(!(0{"use strict";Object.defineProperty(e,"__esModule",{value:!0}),e.iterateExtraOptions=void 0,e.iterateExtraOptions=(t,n,r,a)=>{if("object"==typeof t&&null!==t){if(r.has(t))throw new Error("Circular reference in options");r.add(t)}Object.entries(t).forEach((([t,i])=>{const o=n?n+t:t;if("object"==typeof i)(0,e.iterateExtraOptions)(i,o+".",r,a);else if("string"==typeof i||"number"==typeof i)a(o,i.toString());else{if("boolean"!=typeof i)throw new Error("Can\'t handle extra config type: "+typeof i);a(o,i?"1":"0")}}))}},586:(t,e,n)=>{"use strict";Object.defineProperty(e,"__esModule",{value:!0}),e.setRunOptions=void 0;const r=n(967),a=n(983),i=n(361);e.setRunOptions=t=>{const e=(0,i.getInstance)();let n=0;const o=[],u=t||{};try{if(void 0===(null==t?void 0:t.logSeverityLevel))u.logSeverityLevel=2;else if("number"!=typeof t.logSeverityLevel||!Number.isInteger(t.logSeverityLevel)||t.logSeverityLevel<0||t.logSeverityLevel>4)throw new Error(`log serverity level is not valid: ${t.logSeverityLevel}`);if(void 0===(null==t?void 0:t.logVerbosityLevel))u.logVerbosityLevel=0;else if("number"!=typeof t.logVerbosityLevel||!Number.isInteger(t.logVerbosityLevel))throw new Error(`log verbosity level is not valid: ${t.logVerbosityLevel}`);void 0===(null==t?void 0:t.terminate)&&(u.terminate=!1);let i=0;if(void 0!==(null==t?void 0:t.tag)&&(i=(0,a.allocWasmString)(t.tag,o)),n=e._OrtCreateRunOptions(u.logSeverityLevel,u.logVerbosityLevel,!!u.terminate,i),0===n)throw new Error("Can\'t create run options");return void 0!==(null==t?void 0:t.extra)&&(0,r.iterateExtraOptions)(t.extra,"",new WeakSet,((t,r)=>{const i=(0,a.allocWasmString)(t,o),u=(0,a.allocWasmString)(r,o);if(0!==e._OrtAddRunConfigEntry(n,i,u))throw new Error(`Can\'t set a run config entry: ${t} - ${r}`)})),[n,o]}catch(t){throw 0!==n&&e._OrtReleaseRunOptions(n),o.forEach(e._free),t}}},919:(t,e,n)=>{"use strict";Object.defineProperty(e,"__esModule",{value:!0}),e.setSessionOptions=void 0;const r=n(967),a=n(983),i=n(361);e.setSessionOptions=t=>{const e=(0,i.getInstance)();let n=0;const o=[],u=t||{};(t=>{t.extra||(t.extra={}),t.extra.session||(t.extra.session={});const e=t.extra.session;e.use_ort_model_bytes_directly||(e.use_ort_model_bytes_directly="1")})(u);try{void 0===(null==t?void 0:t.graphOptimizationLevel)&&(u.graphOptimizationLevel="all");const c=(t=>{switch(t){case"disabled":return 0;case"basic":return 1;case"extended":return 2;case"all":return 99;default:throw new Error(`unsupported graph optimization level: ${t}`)}})(u.graphOptimizationLevel);void 0===(null==t?void 0:t.enableCpuMemArena)&&(u.enableCpuMemArena=!0),void 0===(null==t?void 0:t.enableMemPattern)&&(u.enableMemPattern=!0),void 0===(null==t?void 0:t.executionMode)&&(u.executionMode="sequential");const s=(t=>{switch(t){case"sequential":return 0;case"parallel":return 1;default:throw new Error(`unsupported execution mode: ${t}`)}})(u.executionMode);let l=0;if(void 0!==(null==t?void 0:t.logId)&&(l=(0,a.allocWasmString)(t.logId,o)),void 0===(null==t?void 0:t.logSeverityLevel))u.logSeverityLevel=2;else if("number"!=typeof t.logSeverityLevel||!Number.isInteger(t.logSeverityLevel)||t.logSeverityLevel<0||t.logSeverityLevel>4)throw new Error(`log serverity level is not valid: ${t.logSeverityLevel}`);if(void 0===(null==t?void 0:t.logVerbosityLevel))u.logVerbosityLevel=0;else if("number"!=typeof t.logVerbosityLevel||!Number.isInteger(t.logVerbosityLevel))throw new Error(`log verbosity level is not valid: ${t.logVerbosityLevel}`);if(void 0===(null==t?void 0:t.enableProfiling)&&(u.enableProfiling=!1),n=e._OrtCreateSessionOptions(c,!!u.enableCpuMemArena,!!u.enableMemPattern,s,!!u.enableProfiling,0,l,u.logSeverityLevel,u.logVerbosityLevel),0===n)throw new Error("Can\'t create session options");return(null==t?void 0:t.executionProviders)&&((t,e,n)=>{for(const r of e){let e="string"==typeof r?r:r.name;switch(e){case"xnnpack":e="XNNPACK";break;case"wasm":case"cpu":continue;default:throw new Error(`not supported EP: ${e}`)}const o=(0,a.allocWasmString)(e,n);if(0!==(0,i.getInstance)()._OrtAppendExecutionProvider(t,o))throw new Error(`Can\'t append execution provider: ${e}`)}})(n,t.executionProviders,o),void 0!==(null==t?void 0:t.extra)&&(0,r.iterateExtraOptions)(t.extra,"",new WeakSet,((t,r)=>{const i=(0,a.allocWasmString)(t,o),u=(0,a.allocWasmString)(r,o);if(0!==e._OrtAddSessionConfigEntry(n,i,u))throw new Error(`Can\'t set a session config entry: ${t} - ${r}`)})),[n,o]}catch(t){throw 0!==n&&e._OrtReleaseSessionOptions(n),o.forEach(e._free),t}}},983:(t,e,n)=>{"use strict";Object.defineProperty(e,"__esModule",{value:!0}),e.allocWasmString=void 0;const r=n(361);e.allocWasmString=(t,e)=>{const n=(0,r.getInstance)(),a=n.lengthBytesUTF8(t)+1,i=n._malloc(a);return n.stringToUTF8(t,i,a),e.push(i),i}},349:(t,e,n)=>{"use strict";Object.defineProperty(e,"__esModule",{value:!0}),e.extractTransferableBuffers=e.endProfiling=e.run=e.releaseSession=e.createSession=e.createSessionFinalize=e.createSessionAllocate=e.initOrt=void 0;const r=n(586),a=n(919),i=n(983),o=n(361);e.initOrt=(t,e)=>{const n=(0,o.getInstance)()._OrtInit(t,e);if(0!==n)throw new Error(`Can\'t initialize onnxruntime. error code = ${n}`)};const u=new Map;e.createSessionAllocate=t=>{const e=(0,o.getInstance)(),n=e._malloc(t.byteLength);return e.HEAPU8.set(t,n),[n,t.byteLength]},e.createSessionFinalize=(t,e)=>{const n=(0,o.getInstance)();let r=0,i=0,c=[];try{if([i,c]=(0,a.setSessionOptions)(e),r=n._OrtCreateSession(t[0],t[1],i),0===r)throw new Error("Can\'t create a session")}finally{n._free(t[0]),n._OrtReleaseSessionOptions(i),c.forEach(n._free)}const s=n._OrtGetInputCount(r),l=n._OrtGetOutputCount(r),f=[],p=[],h=[],d=[];for(let t=0;t{const r=(0,e.createSessionAllocate)(t);return(0,e.createSessionFinalize)(r,n)},e.releaseSession=t=>{const e=(0,o.getInstance)(),n=u.get(t);if(!n)throw new Error("invalid session id");const r=n[0],a=n[1],i=n[2];a.forEach(e._OrtFree),i.forEach(e._OrtFree),e._OrtReleaseSession(r),u.delete(t)};const c=t=>{switch(t){case"int8":return 3;case"uint8":return 2;case"bool":return 9;case"int16":return 5;case"uint16":return 4;case"int32":return 6;case"uint32":return 12;case"float32":return 1;case"float64":return 11;case"string":return 8;case"int64":return 7;case"uint64":return 13;default:throw new Error(`unsupported data type: ${t}`)}},s=t=>{switch(t){case 3:return"int8";case 2:return"uint8";case 9:return"bool";case 5:return"int16";case 4:return"uint16";case 6:return"int32";case 12:return"uint32";case 1:return"float32";case 11:return"float64";case 8:return"string";case 7:return"int64";case 13:return"uint64";default:throw new Error(`unsupported data type: ${t}`)}},l=t=>{switch(t){case"float32":return Float32Array;case"uint8":case"bool":return Uint8Array;case"int8":return Int8Array;case"uint16":return Uint16Array;case"int16":return Int16Array;case"int32":return Int32Array;case"float64":return Float64Array;case"uint32":return Uint32Array;case"int64":return BigInt64Array;case"uint64":return BigUint64Array;default:throw new Error(`unsupported type: ${t}`)}};e.run=(t,e,n,a,f)=>{const p=(0,o.getInstance)(),h=u.get(t);if(!h)throw new Error("invalid session id");const d=h[0],y=h[1],b=h[2],m=e.length,g=a.length;let v=0,w=[];const _=[],O=[];try{[v,w]=(0,r.setRunOptions)(f);for(let t=0;tp.HEAP32[t++]=e));const n=p._OrtCreateTensor(c(e),o,u,l,r.length);if(0===n)throw new Error("Can\'t create a tensor");_.push(n)}finally{p.stackRestore(s)}}const t=p.stackSave(),o=p.stackAlloc(4*m),u=p.stackAlloc(4*m),h=p.stackAlloc(4*g),A=p.stackAlloc(4*g);try{let n=o/4,r=u/4,i=h/4,c=A/4;for(let t=0;tt*e));if(a=s(o),"string"===a){const t=[];let e=i/4;for(let n=0;n{const e=(0,o.getInstance)(),n=u.get(t);if(!n)throw new Error("invalid session id");const r=n[0],a=e._OrtEndProfiling(r);if(0===a)throw new Error("Can\'t get an profile file name");e._OrtFree(a)},e.extractTransferableBuffers=t=>{const e=[];for(const n of t){const t=n[2];!Array.isArray(t)&&t.buffer&&e.push(t.buffer)}return e}},361:function(t,e,n){"use strict";var r=this&&this.__createBinding||(Object.create?function(t,e,n,r){void 0===r&&(r=n);var a=Object.getOwnPropertyDescriptor(e,n);a&&!("get"in a?!e.__esModule:a.writable||a.configurable)||(a={enumerable:!0,get:function(){return e[n]}}),Object.defineProperty(t,r,a)}:function(t,e,n,r){void 0===r&&(r=n),t[r]=e[n]}),a=this&&this.__setModuleDefault||(Object.create?function(t,e){Object.defineProperty(t,"default",{enumerable:!0,value:e})}:function(t,e){t.default=e}),i=this&&this.__importStar||function(t){if(t&&t.__esModule)return t;var e={};if(null!=t)for(var n in t)"default"!==n&&Object.prototype.hasOwnProperty.call(t,n)&&r(e,t,n);return a(e,t),e},o=this&&this.__importDefault||function(t){return t&&t.__esModule?t:{default:t}};Object.defineProperty(e,"__esModule",{value:!0}),e.dispose=e.getInstance=e.initializeWebAssembly=void 0;const u=i(n(449)),c=o(n(932)),s=n(474);let l,f=!1,p=!1,h=!1;const d=(t,e)=>e?t?"ort-wasm-simd-threaded.wasm":"ort-wasm-threaded.wasm":t?"ort-wasm-simd.wasm":"ort-wasm.wasm";e.initializeWebAssembly=async t=>{if(f)return Promise.resolve();if(p)throw new Error("multiple calls to \'initializeWebAssembly()\' detected.");if(h)throw new Error("previous call to \'initializeWebAssembly()\' failed.");p=!0;const e=t.initTimeout,r=t.numThreads,a=t.simd,i=r>1&&(()=>{try{return"undefined"!=typeof SharedArrayBuffer&&("undefined"!=typeof MessageChannel&&(new MessageChannel).port1.postMessage(new SharedArrayBuffer(1)),WebAssembly.validate(new Uint8Array([0,97,115,109,1,0,0,0,1,4,1,96,0,0,3,2,1,0,5,4,1,3,1,1,10,11,1,9,0,65,0,254,16,2,0,26,11])))}catch(t){return!1}})(),o=a&&(()=>{try{return WebAssembly.validate(new Uint8Array([0,97,115,109,1,0,0,0,1,4,1,96,0,0,3,2,1,0,10,30,1,28,0,65,0,253,15,253,12,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,253,186,1,26,11]))}catch(t){return!1}})(),y="string"==typeof t.wasmPaths?t.wasmPaths:void 0,b=d(!1,i),m=d(o,i),g="object"==typeof t.wasmPaths?t.wasmPaths[m]:void 0;let v=!1;const w=[];if(e>0&&w.push(new Promise((t=>{setTimeout((()=>{v=!0,t()}),e)}))),w.push(new Promise(((t,e)=>{const r=i?s:c.default,a={locateFile:(t,e)=>i&&t.endsWith(".worker.js")&&"undefined"!=typeof Blob?URL.createObjectURL(new Blob([n(154)],{type:"text/javascript"})):t===b?null!=g?g:(null!=y?y:e)+m:e+t};if(i)if("undefined"==typeof Blob)a.mainScriptUrlOrBlob=u.join("/","ort-wasm-threaded.js");else{const t=`var ortWasmThreaded=(function(){var _scriptDir;return ${r.toString()}})();`;a.mainScriptUrlOrBlob=new Blob([t],{type:"text/javascript"})}r(a).then((e=>{p=!1,f=!0,l=e,t()}),(t=>{p=!1,h=!0,e(t)}))}))),await Promise.race(w),v)throw new Error(`WebAssembly backend initializing failed due to timeout: ${e}ms`)},e.getInstance=()=>{if(f&&l)return l;throw new Error("WebAssembly is not initialized yet.")},e.dispose=()=>{var t;!f||p||h||(p=!0,null===(t=l.PThread)||void 0===t||t.terminateAllThreads(),l=void 0,p=!1,f=!1,h=!0)}},154:t=>{"use strict";t.exports=\'"use strict";var e={},t="object"==typeof process&&"object"==typeof process.versions&&"string"==typeof process.versions.node;if(t){var r=require("worker_threads"),a=r.parentPort;a.on("message",(e=>onmessage({data:e})));var o=require("fs");Object.assign(global,{self:global,require:require,Module:e,location:{href:__filename},Worker:r.Worker,importScripts:function(e){(0,eval)(o.readFileSync(e,"utf8"))},postMessage:function(e){a.postMessage(e)},performance:global.performance||{now:function(){return Date.now()}}})}var s=!1,n=[],i=function(){var e=Array.prototype.slice.call(arguments).join(" ");t?o.writeSync(2,e+"\\\\n"):console.error(e)};self.alert=function(){var t=Array.prototype.slice.call(arguments).join(" ");postMessage({cmd:"alert",text:t,threadId:e._pthread_self()})},e.instantiateWasm=(t,r)=>{var a=new WebAssembly.Instance(e.wasmModule,t);return r(a),e.wasmModule=null,a.exports},self.onunhandledrejection=e=>{throw e.reason??e},self.onmessage=t=>{try{if("load"===t.data.cmd){if(e.wasmModule=t.data.wasmModule,e.wasmMemory=t.data.wasmMemory,e.buffer=e.wasmMemory.buffer,e.ENVIRONMENT_IS_PTHREAD=!0,"string"==typeof t.data.urlOrBlob)importScripts(t.data.urlOrBlob);else{var r=URL.createObjectURL(t.data.urlOrBlob);importScripts(r),URL.revokeObjectURL(r)}ortWasmThreaded(e).then((function(t){e=t}))}else if("run"===t.data.cmd){e.__performance_now_clock_drift=performance.now()-t.data.time,e.__emscripten_thread_init(t.data.pthread_ptr,0,0,1),e.establishStackSpace(),e.PThread.receiveObjectTransfer(t.data),e.PThread.threadInitTLS(),s||(n.forEach((t=>{e.executeNotifiedProxyingQueue(t)})),n=[],s=!0);try{e.invokeEntryPoint(t.data.start_routine,t.data.arg)}catch(t){if("unwind"!=t){if(!(t instanceof e.ExitStatus))throw t;e.keepRuntimeAlive()||e.__emscripten_thread_exit(t.status)}}}else"cancel"===t.data.cmd?e._pthread_self()&&e.__emscripten_thread_exit(-1):"setimmediate"===t.data.target||("processProxyingQueue"===t.data.cmd?s?e.executeNotifiedProxyingQueue(t.data.queue):n.push(t.data.queue):(i("worker.js received unknown command "+t.data.cmd),i(t.data)))}catch(t){throw i("worker.js onmessage() captured an uncaught exception: "+t),t&&t.stack&&i(t.stack),e.__emscripten_thread_crashed&&e.__emscripten_thread_crashed(),t}};\\n\'},384:()=>{},993:()=>{},908:()=>{},953:()=>{},925:()=>{},449:()=>{}},e={};function n(r){var a=e[r];if(void 0!==a)return a.exports;var i=e[r]={exports:{}};return t[r].call(i.exports,i,i.exports,n),i.exports}n.g=function(){if("object"==typeof globalThis)return globalThis;try{return this||new Function("return this")()}catch(t){if("object"==typeof window)return window}}(),(()=>{"use strict";const t=n(349),e=n(361);self.onmessage=n=>{switch(n.data.type){case"init-wasm":(0,e.initializeWebAssembly)(n.data.in).then((()=>postMessage({type:"init-wasm"})),(t=>postMessage({type:"init-wasm",err:t})));break;case"init-ort":try{const{numThreads:e,loggingLevel:r}=n.data.in;(0,t.initOrt)(e,r),postMessage({type:"init-ort"})}catch(t){postMessage({type:"init-ort",err:t})}break;case"create_allocate":try{const{model:e}=n.data.in,r=(0,t.createSessionAllocate)(e);postMessage({type:"create_allocate",out:r})}catch(t){postMessage({type:"create_allocate",err:t})}break;case"create_finalize":try{const{modeldata:e,options:r}=n.data.in,a=(0,t.createSessionFinalize)(e,r);postMessage({type:"create_finalize",out:a})}catch(t){postMessage({type:"create_finalize",err:t})}break;case"create":try{const{model:e,options:r}=n.data.in,a=(0,t.createSession)(e,r);postMessage({type:"create",out:a})}catch(t){postMessage({type:"create",err:t})}break;case"release":try{const e=n.data.in;(0,t.releaseSession)(e),postMessage({type:"release"})}catch(t){postMessage({type:"release",err:t})}break;case"run":try{const{sessionId:e,inputIndices:r,inputs:a,outputIndices:i,options:o}=n.data.in,u=(0,t.run)(e,r,a,i,o);postMessage({type:"run",out:u},(0,t.extractTransferableBuffers)(u))}catch(t){postMessage({type:"run",err:t})}break;case"end-profiling":try{const e=n.data.in;(0,t.endProfiling)(e),postMessage({type:"end-profiling"})}catch(t){postMessage({type:"end-profiling",err:t})}}}})()})();\n', "Worker", void 0, void 0) } }, 477: b => { b.exports = function(n, a, u, c) { var f = self || window; try { try { var s; try { s = new f.Blob([n]) } catch { (s = new(f.BlobBuilder || f.WebKitBlobBuilder || f.MozBlobBuilder || f.MSBlobBuilder)).append(n), s = s.getBlob() } var h = f.URL || f.webkitURL, p = h.createObjectURL(s), l = new f[a](p, u); return h.revokeObjectURL(p), l } catch { return new f[a]("data:application/javascript,".concat(encodeURIComponent(n)), u) } } catch { if (!c) throw Error("Inline worker is not supported"); return new f[a](c, u) } } }, 4154: b => { b.exports = `"use strict";var e={},t="object"==typeof process&&"object"==typeof process.versions&&"string"==typeof process.versions.node;if(t){var r=require("worker_threads"),a=r.parentPort;a.on("message",(e=>onmessage({data:e})));var o=require("fs");Object.assign(global,{self:global,require:require,Module:e,location:{href:__filename},Worker:r.Worker,importScripts:function(e){(0,eval)(o.readFileSync(e,"utf8"))},postMessage:function(e){a.postMessage(e)},performance:global.performance||{now:function(){return Date.now()}}})}var s=!1,n=[],i=function(){var e=Array.prototype.slice.call(arguments).join(" ");t?o.writeSync(2,e+"\\n"):console.error(e)};self.alert=function(){var t=Array.prototype.slice.call(arguments).join(" ");postMessage({cmd:"alert",text:t,threadId:e._pthread_self()})},e.instantiateWasm=(t,r)=>{var a=new WebAssembly.Instance(e.wasmModule,t);return r(a),e.wasmModule=null,a.exports},self.onunhandledrejection=e=>{throw e.reason??e},self.onmessage=t=>{try{if("load"===t.data.cmd){if(e.wasmModule=t.data.wasmModule,e.wasmMemory=t.data.wasmMemory,e.buffer=e.wasmMemory.buffer,e.ENVIRONMENT_IS_PTHREAD=!0,"string"==typeof t.data.urlOrBlob)importScripts(t.data.urlOrBlob);else{var r=URL.createObjectURL(t.data.urlOrBlob);importScripts(r),URL.revokeObjectURL(r)}ortWasmThreaded(e).then((function(t){e=t}))}else if("run"===t.data.cmd){e.__performance_now_clock_drift=performance.now()-t.data.time,e.__emscripten_thread_init(t.data.pthread_ptr,0,0,1),e.establishStackSpace(),e.PThread.receiveObjectTransfer(t.data),e.PThread.threadInitTLS(),s||(n.forEach((t=>{e.executeNotifiedProxyingQueue(t)})),n=[],s=!0);try{e.invokeEntryPoint(t.data.start_routine,t.data.arg)}catch(t){if("unwind"!=t){if(!(t instanceof e.ExitStatus))throw t;e.keepRuntimeAlive()||e.__emscripten_thread_exit(t.status)}}}else"cancel"===t.data.cmd?e._pthread_self()&&e.__emscripten_thread_exit(-1):"setimmediate"===t.data.target||("processProxyingQueue"===t.data.cmd?s?e.executeNotifiedProxyingQueue(t.data.queue):n.push(t.data.queue):(i("worker.js received unknown command "+t.data.cmd),i(t.data)))}catch(t){throw i("worker.js onmessage() captured an uncaught exception: "+t),t&&t.stack&&i(t.stack),e.__emscripten_thread_crashed&&e.__emscripten_thread_crashed(),t}}; ` }, 1670: b => { b.exports = __WEBPACK_EXTERNAL_MODULE__1670__ }, 7067: () => {}, 1296: () => {}, 1384: () => {}, 3993: () => {}, 908: () => {}, 6953: () => {}, 9925: () => {}, 2806: () => {}, 6449: () => {}, 2850: () => {}, 5381: () => {}, 5686: (b, n, a) => { a.r(n), a.d(n, { flatbuffers: () => u }); var u = {}; u.Offset, u.Table, u.SIZEOF_SHORT = 2, u.SIZEOF_INT = 4, u.FILE_IDENTIFIER_LENGTH = 4, u.SIZE_PREFIX_LENGTH = 4, u.Encoding = { UTF8_BYTES: 1, UTF16_STRING: 2 }, u.int32 = new Int32Array(2), u.float32 = new Float32Array(u.int32.buffer), u.float64 = new Float64Array(u.int32.buffer), u.isLittleEndian = new Uint16Array(new Uint8Array([1, 0]).buffer)[0] === 1, u.Long = function(c, f) { this.low = 0 | c, this.high = 0 | f }, u.Long.create = function(c, f) { return c == 0 && f == 0 ? u.Long.ZERO : new u.Long(c, f) }, u.Long.prototype.toFloat64 = function() { return (this.low >>> 0) + 4294967296 * this.high }, u.Long.prototype.equals = function(c) { return this.low == c.low && this.high == c.high }, u.Long.ZERO = new u.Long(0, 0), u.Builder = function(c) { if (c) f = c; else var f = 1024; this.bb = u.ByteBuffer.allocate(f), this.space = f, this.minalign = 1, this.vtable = null, this.vtable_in_use = 0, this.isNested = !1, this.object_start = 0, this.vtables = [], this.vector_num_elems = 0, this.force_defaults = !1 }, u.Builder.prototype.clear = function() { this.bb.clear(), this.space = this.bb.capacity(), this.minalign = 1, this.vtable = null, this.vtable_in_use = 0, this.isNested = !1, this.object_start = 0, this.vtables = [], this.vector_num_elems = 0, this.force_defaults = !1 }, u.Builder.prototype.forceDefaults = function(c) { this.force_defaults = c }, u.Builder.prototype.dataBuffer = function() { return this.bb }, u.Builder.prototype.asUint8Array = function() { return this.bb.bytes().subarray(this.bb.position(), this.bb.position() + this.offset()) }, u.Builder.prototype.prep = function(c, f) { c > this.minalign && (this.minalign = c); for (var s = 1 + ~(this.bb.capacity() - this.space + f) & c - 1; this.space < s + c + f;) { var h = this.bb.capacity(); this.bb = u.Builder.growByteBuffer(this.bb), this.space += this.bb.capacity() - h } this.pad(s) }, u.Builder.prototype.pad = function(c) { for (var f = 0; f < c; f++) this.bb.writeInt8(--this.space, 0) }, u.Builder.prototype.writeInt8 = function(c) { this.bb.writeInt8(this.space -= 1, c) }, u.Builder.prototype.writeInt16 = function(c) { this.bb.writeInt16(this.space -= 2, c) }, u.Builder.prototype.writeInt32 = function(c) { this.bb.writeInt32(this.space -= 4, c) }, u.Builder.prototype.writeInt64 = function(c) { this.bb.writeInt64(this.space -= 8, c) }, u.Builder.prototype.writeFloat32 = function(c) { this.bb.writeFloat32(this.space -= 4, c) }, u.Builder.prototype.writeFloat64 = function(c) { this.bb.writeFloat64(this.space -= 8, c) }, u.Builder.prototype.addInt8 = function(c) { this.prep(1, 0), this.writeInt8(c) }, u.Builder.prototype.addInt16 = function(c) { this.prep(2, 0), this.writeInt16(c) }, u.Builder.prototype.addInt32 = function(c) { this.prep(4, 0), this.writeInt32(c) }, u.Builder.prototype.addInt64 = function(c) { this.prep(8, 0), this.writeInt64(c) }, u.Builder.prototype.addFloat32 = function(c) { this.prep(4, 0), this.writeFloat32(c) }, u.Builder.prototype.addFloat64 = function(c) { this.prep(8, 0), this.writeFloat64(c) }, u.Builder.prototype.addFieldInt8 = function(c, f, s) { (this.force_defaults || f != s) && (this.addInt8(f), this.slot(c)) }, u.Builder.prototype.addFieldInt16 = function(c, f, s) { (this.force_defaults || f != s) && (this.addInt16(f), this.slot(c)) }, u.Builder.prototype.addFieldInt32 = function(c, f, s) { (this.force_defaults || f != s) && (this.addInt32(f), this.slot(c)) }, u.Builder.prototype.addFieldInt64 = function(c, f, s) { !this.force_defaults && f.equals(s) || (this.addInt64(f), this.slot(c)) }, u.Builder.prototype.addFieldFloat32 = function(c, f, s) { (this.force_defaults || f != s) && (this.addFloat32(f), this.slot(c)) }, u.Builder.prototype.addFieldFloat64 = function(c, f, s) { (this.force_defaults || f != s) && (this.addFloat64(f), this.slot(c)) }, u.Builder.prototype.addFieldOffset = function(c, f, s) { (this.force_defaults || f != s) && (this.addOffset(f), this.slot(c)) }, u.Builder.prototype.addFieldStruct = function(c, f, s) { f != s && (this.nested(f), this.slot(c)) }, u.Builder.prototype.nested = function(c) { if (c != this.offset()) throw new Error("FlatBuffers: struct must be serialized inline.") }, u.Builder.prototype.notNested = function() { if (this.isNested) throw new Error("FlatBuffers: object serialization must not be nested.") }, u.Builder.prototype.slot = function(c) { this.vtable[c] = this.offset() }, u.Builder.prototype.offset = function() { return this.bb.capacity() - this.space }, u.Builder.growByteBuffer = function(c) { var f = c.capacity(); if (3221225472 & f) throw new Error("FlatBuffers: cannot grow buffer beyond 2 gigabytes."); var s = f << 1, h = u.ByteBuffer.allocate(s); return h.setPosition(s - f), h.bytes().set(c.bytes(), s - f), h }, u.Builder.prototype.addOffset = function(c) { this.prep(u.SIZEOF_INT, 0), this.writeInt32(this.offset() - c + u.SIZEOF_INT) }, u.Builder.prototype.startObject = function(c) { this.notNested(), this.vtable == null && (this.vtable = []), this.vtable_in_use = c; for (var f = 0; f < c; f++) this.vtable[f] = 0; this.isNested = !0, this.object_start = this.offset() }, u.Builder.prototype.endObject = function() { if (this.vtable == null || !this.isNested) throw new Error("FlatBuffers: endObject called without startObject"); this.addInt32(0); for (var c = this.offset(), f = this.vtable_in_use - 1; f >= 0 && this.vtable[f] == 0; f--); for (var s = f + 1; f >= 0; f--) this.addInt16(this.vtable[f] != 0 ? c - this.vtable[f] : 0); this.addInt16(c - this.object_start); var h = (s + 2) * u.SIZEOF_SHORT; this.addInt16(h); var p = 0, l = this.space; e: for (f = 0; f < this.vtables.length; f++) { var o = this.bb.capacity() - this.vtables[f]; if (h == this.bb.readInt16(o)) { for (var t = u.SIZEOF_SHORT; t < h; t += u.SIZEOF_SHORT) if (this.bb.readInt16(l + t) != this.bb.readInt16(o + t)) continue e; p = this.vtables[f]; break } } return p ? (this.space = this.bb.capacity() - c, this.bb.writeInt32(this.space, p - c)) : (this.vtables.push(this.offset()), this.bb.writeInt32(this.bb.capacity() - c, this.offset() - c)), this.isNested = !1, c }, u.Builder.prototype.finish = function(c, f, s) { var h = s ? u.SIZE_PREFIX_LENGTH : 0; if (f) { var p = f; if (this.prep(this.minalign, u.SIZEOF_INT + u.FILE_IDENTIFIER_LENGTH + h), p.length != u.FILE_IDENTIFIER_LENGTH) throw new Error("FlatBuffers: file identifier must be length " + u.FILE_IDENTIFIER_LENGTH); for (var l = u.FILE_IDENTIFIER_LENGTH - 1; l >= 0; l--) this.writeInt8(p.charCodeAt(l)) } this.prep(this.minalign, u.SIZEOF_INT + h), this.addOffset(c), h && this.addInt32(this.bb.capacity() - this.space), this.bb.setPosition(this.space) }, u.Builder.prototype.finishSizePrefixed = function(c, f) { this.finish(c, f, !0) }, u.Builder.prototype.requiredField = function(c, f) { var s = this.bb.capacity() - c, h = s - this.bb.readInt32(s); if (this.bb.readInt16(h + f) == 0) throw new Error("FlatBuffers: field " + f + " must be set") }, u.Builder.prototype.startVector = function(c, f, s) { this.notNested(), this.vector_num_elems = f, this.prep(u.SIZEOF_INT, c * f), this.prep(s, c * f) }, u.Builder.prototype.endVector = function() { return this.writeInt32(this.vector_num_elems), this.offset() }, u.Builder.prototype.createString = function(c) { if (c instanceof Uint8Array) var f = c; else { f = []; for (var s = 0; s < c.length;) { var h, p = c.charCodeAt(s++); (h = p < 55296 || p >= 56320 ? p : (p << 10) + c.charCodeAt(s++) + -56613888) < 128 ? f.push(h) : (h < 2048 ? f.push(h >> 6 & 31 | 192) : (h < 65536 ? f.push(h >> 12 & 15 | 224) : f.push(h >> 18 & 7 | 240, h >> 12 & 63 | 128), f.push(h >> 6 & 63 | 128)), f.push(63 & h | 128)) } } this.addInt8(0), this.startVector(1, f.length, 1), this.bb.setPosition(this.space -= f.length), s = 0; for (var l = this.space, o = this.bb.bytes(); s < f.length; s++) o[l++] = f[s]; return this.endVector() }, u.Builder.prototype.createLong = function(c, f) { return u.Long.create(c, f) }, u.ByteBuffer = function(c) { this.bytes_ = c, this.position_ = 0 }, u.ByteBuffer.allocate = function(c) { return new u.ByteBuffer(new Uint8Array(c)) }, u.ByteBuffer.prototype.clear = function() { this.position_ = 0 }, u.ByteBuffer.prototype.bytes = function() { return this.bytes_ }, u.ByteBuffer.prototype.position = function() { return this.position_ }, u.ByteBuffer.prototype.setPosition = function(c) { this.position_ = c }, u.ByteBuffer.prototype.capacity = function() { return this.bytes_.length }, u.ByteBuffer.prototype.readInt8 = function(c) { return this.readUint8(c) << 24 >> 24 }, u.ByteBuffer.prototype.readUint8 = function(c) { return this.bytes_[c] }, u.ByteBuffer.prototype.readInt16 = function(c) { return this.readUint16(c) << 16 >> 16 }, u.ByteBuffer.prototype.readUint16 = function(c) { return this.bytes_[c] | this.bytes_[c + 1] << 8 }, u.ByteBuffer.prototype.readInt32 = function(c) { return this.bytes_[c] | this.bytes_[c + 1] << 8 | this.bytes_[c + 2] << 16 | this.bytes_[c + 3] << 24 }, u.ByteBuffer.prototype.readUint32 = function(c) { return this.readInt32(c) >>> 0 }, u.ByteBuffer.prototype.readInt64 = function(c) { return new u.Long(this.readInt32(c), this.readInt32(c + 4)) }, u.ByteBuffer.prototype.readUint64 = function(c) { return new u.Long(this.readUint32(c), this.readUint32(c + 4)) }, u.ByteBuffer.prototype.readFloat32 = function(c) { return u.int32[0] = this.readInt32(c), u.float32[0] }, u.ByteBuffer.prototype.readFloat64 = function(c) { return u.int32[u.isLittleEndian ? 0 : 1] = this.readInt32(c), u.int32[u.isLittleEndian ? 1 : 0] = this.readInt32(c + 4), u.float64[0] }, u.ByteBuffer.prototype.writeInt8 = function(c, f) { this.bytes_[c] = f }, u.ByteBuffer.prototype.writeUint8 = function(c, f) { this.bytes_[c] = f }, u.ByteBuffer.prototype.writeInt16 = function(c, f) { this.bytes_[c] = f, this.bytes_[c + 1] = f >> 8 }, u.ByteBuffer.prototype.writeUint16 = function(c, f) { this.bytes_[c] = f, this.bytes_[c + 1] = f >> 8 }, u.ByteBuffer.prototype.writeInt32 = function(c, f) { this.bytes_[c] = f, this.bytes_[c + 1] = f >> 8, this.bytes_[c + 2] = f >> 16, this.bytes_[c + 3] = f >> 24 }, u.ByteBuffer.prototype.writeUint32 = function(c, f) { this.bytes_[c] = f, this.bytes_[c + 1] = f >> 8, this.bytes_[c + 2] = f >> 16, this.bytes_[c + 3] = f >> 24 }, u.ByteBuffer.prototype.writeInt64 = function(c, f) { this.writeInt32(c, f.low), this.writeInt32(c + 4, f.high) }, u.ByteBuffer.prototype.writeUint64 = function(c, f) { this.writeUint32(c, f.low), this.writeUint32(c + 4, f.high) }, u.ByteBuffer.prototype.writeFloat32 = function(c, f) { u.float32[0] = f, this.writeInt32(c, u.int32[0]) }, u.ByteBuffer.prototype.writeFloat64 = function(c, f) { u.float64[0] = f, this.writeInt32(c, u.int32[u.isLittleEndian ? 0 : 1]), this.writeInt32(c + 4, u.int32[u.isLittleEndian ? 1 : 0]) }, u.ByteBuffer.prototype.getBufferIdentifier = function() { if (this.bytes_.length < this.position_ + u.SIZEOF_INT + u.FILE_IDENTIFIER_LENGTH) throw new Error("FlatBuffers: ByteBuffer is too short to contain an identifier."); for (var c = "", f = 0; f < u.FILE_IDENTIFIER_LENGTH; f++) c += String.fromCharCode(this.readInt8(this.position_ + u.SIZEOF_INT + f)); return c }, u.ByteBuffer.prototype.__offset = function(c, f) { var s = c - this.readInt32(c); return f < this.readInt16(s) ? this.readInt16(s + f) : 0 }, u.ByteBuffer.prototype.__union = function(c, f) { return c.bb_pos = f + this.readInt32(f), c.bb = this, c }, u.ByteBuffer.prototype.__string = function(c, f) { c += this.readInt32(c); var s = this.readInt32(c), h = "", p = 0; if (c += u.SIZEOF_INT, f === u.Encoding.UTF8_BYTES) return this.bytes_.subarray(c, c + s); for (; p < s;) { var l, o = this.readUint8(c + p++); if (o < 192) l = o; else { var t = this.readUint8(c + p++); if (o < 224) l = (31 & o) << 6 | 63 & t; else { var e = this.readUint8(c + p++); l = o < 240 ? (15 & o) << 12 | (63 & t) << 6 | 63 & e : (7 & o) << 18 | (63 & t) << 12 | (63 & e) << 6 | 63 & this.readUint8(c + p++) } } l < 65536 ? h += String.fromCharCode(l) : (l -= 65536, h += String.fromCharCode(55296 + (l >> 10), 56320 + (1023 & l))) } return h }, u.ByteBuffer.prototype.__indirect = function(c) { return c + this.readInt32(c) }, u.ByteBuffer.prototype.__vector = function(c) { return c + this.readInt32(c) + u.SIZEOF_INT }, u.ByteBuffer.prototype.__vector_len = function(c) { return this.readInt32(c + this.readInt32(c)) }, u.ByteBuffer.prototype.__has_identifier = function(c) { if (c.length != u.FILE_IDENTIFIER_LENGTH) throw new Error("FlatBuffers: file identifier must be length " + u.FILE_IDENTIFIER_LENGTH); for (var f = 0; f < u.FILE_IDENTIFIER_LENGTH; f++) if (c.charCodeAt(f) != this.readInt8(this.position_ + u.SIZEOF_INT + f)) return !1; return !0 }, u.ByteBuffer.prototype.createLong = function(c, f) { return u.Long.create(c, f) } } }, __webpack_module_cache__ = {}; function __webpack_require__(b) { var n = __webpack_module_cache__[b]; if (n !== void 0) return n.exports; var a = __webpack_module_cache__[b] = { exports: {} }; return __webpack_modules__[b].call(a.exports, a, a.exports, __webpack_require__), a.exports } __webpack_require__.n = b => { var n = b && b.__esModule ? () => b.default : () => b; return __webpack_require__.d(n, { a: n }), n }, __webpack_require__.d = (b, n) => { for (var a in n) __webpack_require__.o(n, a) && !__webpack_require__.o(b, a) && Object.defineProperty(b, a, { enumerable: !0, get: n[a] }) }, __webpack_require__.g = function() { if (typeof globalThis == "object") return globalThis; try { return this || new Function("return this")() } catch { if (typeof window == "object") return window } }(), __webpack_require__.o = (b, n) => Object.prototype.hasOwnProperty.call(b, n), __webpack_require__.r = b => { typeof Symbol < "u" && Symbol.toStringTag && Object.defineProperty(b, Symbol.toStringTag, { value: "Module" }), Object.defineProperty(b, "__esModule", { value: !0 }) }; var __webpack_exports__ = __webpack_require__(6018); return __webpack_exports__ })()) })(ortWeb_min$1); var ortWeb_minExports = ortWeb_min$1.exports; const ortWeb_min = getDefaultExportFromCjs(ortWeb_minExports), ONNX_WEB = _mergeNamespaces({ __proto__: null, default: ortWeb_min }, [ortWeb_minExports]); let ONNX; const executionProviders = ["wasm"]; typeof process < "u" && process?.release?.name === "node" ? (ONNX = sharp ?? ONNX_NODE, executionProviders.unshift("cpu")) : (ONNX = ortWeb_min ?? ONNX_WEB, typeof navigator < "u" && /iP(hone|od|ad).+16_4.+AppleWebKit/.test(navigator.userAgent) && (ONNX.env.wasm.simd = !1)); const { env: onnx_env } = ONNX, VERSION = "2.15.0", WEB_CACHE_AVAILABLE = typeof self < "u" && "caches" in self, FS_AVAILABLE = !isEmpty(sharp), PATH_AVAILABLE = !isEmpty(sharp), RUNNING_LOCALLY = FS_AVAILABLE && PATH_AVAILABLE, __dirname = RUNNING_LOCALLY ? sharp.dirname(sharp.dirname(sharp.fileURLToPath(import.meta.url))) : "./", DEFAULT_CACHE_DIR = RUNNING_LOCALLY ? sharp.join(__dirname, "/.cache/") : null, DEFAULT_LOCAL_MODEL_PATH = "/models/", localModelPath = RUNNING_LOCALLY ? sharp.join(__dirname, DEFAULT_LOCAL_MODEL_PATH) : DEFAULT_LOCAL_MODEL_PATH; onnx_env.wasm.wasmPaths = RUNNING_LOCALLY ? sharp.join(__dirname, "/dist/") : `https://cdn.jsdelivr.net/npm/@xenova/transformers@${VERSION}/dist/`; const env$1 = { backends: { onnx: onnx_env, tfjs: {} }, __dirname, version: VERSION, allowRemoteModels: !0, remoteHost: "https://huggingface.co/", remotePathTemplate: "{model}/resolve/{revision}/", allowLocalModels: !0, localModelPath, useFS: FS_AVAILABLE, useBrowserCache: WEB_CACHE_AVAILABLE, useFSCache: FS_AVAILABLE, cacheDir: DEFAULT_CACHE_DIR, useCustomCache: !1, customCache: null }; function isEmpty(b) { return Object.keys(b).length === 0 } var define_process_env_default = {}; globalThis.ReadableStream || (globalThis.ReadableStream = sharp.ReadableStream); class FileResponse { _CONTENT_TYPE_MAP = { txt: "text/plain", html: "text/html", css: "text/css", js: "text/javascript", json: "application/json", png: "image/png", jpg: "image/jpeg", jpeg: "image/jpeg", gif: "image/gif" }; constructor(n) { if (this.filePath = n, this.headers = new Headers, this.exists = sharp.existsSync(n), this.exists) { this.status = 200, this.statusText = "OK"; let a = sharp.statSync(n); this.headers.set("content-length", a.size.toString()), this.updateContentType(); let u = this; this.body = new ReadableStream({ start(c) { u.arrayBuffer().then(f => { c.enqueue(new Uint8Array(f)), c.close() }) } }) } else this.status = 404, this.statusText = "Not Found", this.body = null } updateContentType() { const n = this.filePath.toString().split(".").pop().toLowerCase(); this.headers.set("content-type", this._CONTENT_TYPE_MAP[n] ?? "application/octet-stream") } clone() { let n = new FileResponse(this.filePath); return n.exists = this.exists, n.status = this.status, n.statusText = this.statusText, n.headers = new Headers(this.headers), n } async arrayBuffer() { return (await sharp.promises.readFile(this.filePath)).buffer } async blob() { const n = await sharp.promises.readFile(this.filePath); return new Blob([n], { type: this.headers.get("content-type") }) } async text() { return await sharp.promises.readFile(this.filePath, "utf8") } async json() { return JSON.parse(await this.text()) } } function isValidHttpUrl(b, n = null) { let a; try { a = new URL(b) } catch { return !1 } return n && !n.includes(a.hostname) ? !1 : a.protocol === "http:" || a.protocol === "https:" } async function getFile(b) { if (env$1.useFS && !isValidHttpUrl(b)) return new FileResponse(b); if (typeof process < "u" && process?.release?.name === "node") { const n = !!define_process_env_default?.TESTING_REMOTELY, a = env$1.version, u = new Headers; if (u.set("User-Agent", `transformers.js/${a}; is_ci/${n};`), isValidHttpUrl(b, ["huggingface.co", "hf.co"])) { const f = define_process_env_default?.HF_TOKEN ?? define_process_env_default?.HF_ACCESS_TOKEN; f && u.set("Authorization", `Bearer ${f}`) } return fetch(b, { headers: u }) } else return fetch(b) } const ERROR_MAPPING = { 400: "Bad request error occurred while trying to load file", 401: "Unauthorized access to file", 403: "Forbidden access to file", 404: "Could not locate file", 408: "Request timeout error occurred while trying to load file", 500: "Internal server error error occurred while trying to load file", 502: "Bad gateway error occurred while trying to load file", 503: "Service unavailable error occurred while trying to load file", 504: "Gateway timeout error occurred while trying to load file" }; function handleError(b, n, a) { if (!a) return null; const u = ERROR_MAPPING[b] ?? `Error (${b}) occurred while trying to load file`; throw Error(`${u}: "${n}".`) } class FileCache { constructor(n) { this.path = n } async match(n) { let a = sharp.join(this.path, n), u = new FileResponse(a); if (u.exists) return u } async put(n, a) { const u = Buffer.from(await a.arrayBuffer()); let c = sharp.join(this.path, n); try { await sharp.promises.mkdir(sharp.dirname(c), { recursive: !0 }), await sharp.promises.writeFile(c, u) } catch (f) { console.warn("An error occurred while writing the file to cache:", f) } } } async function tryCache(b, ...n) { for (let a of n) try { let u = await b.match(a); if (u) return u } catch { continue } } async function getModelFile(b, n, a = !0, u = {}) { if (!env$1.allowLocalModels && u.local_files_only) throw Error("Invalid configuration detected: local models are disabled (`env.allowLocalModels=false`) but you have requested to only use local models (`local_files_only=true`)."); dispatchCallback(u.progress_callback, { status: "initiate", name: b, file: n }); let c; if (!c && env$1.useBrowserCache) { if (typeof caches > "u") throw Error("Browser cache is not available in this environment."); try { c = await caches.open("transformers-cache") } catch (m) { console.warn("An error occurred while opening the browser cache:", m) } } if (!c && env$1.useFSCache && (c = new FileCache(u.cache_dir ?? env$1.cacheDir)), !c && env$1.useCustomCache) throw Error("`env.useCustomCache=true`, but `env.customCache` is not defined."); const f = u.revision ?? "main"; let s = pathJoin(b, n), h = pathJoin(env$1.localModelPath, s), p = pathJoin(env$1.remoteHost, env$1.remotePathTemplate.replaceAll("{model}", b).replaceAll("{revision}", encodeURIComponent(f)), n), l = f === "main" ? s : pathJoin(b, f, n), o, t = c instanceof FileCache ? l : p, e = !1, r; c && (r = await tryCache(c, h, t)); const i = r !== void 0; if (r === void 0) { if (env$1.allowLocalModels) if (isValidHttpUrl(s)) { if (u.local_files_only) throw new Error(`\`local_files_only=true\`, but attempted to load a remote file from: ${s}.`) } else try { r = await getFile(h), o = h } catch (_) { console.warn(`Unable to load from local path "${h}": "${_}"`) } if (r === void 0 || r.status === 404) { if (u.local_files_only || !env$1.allowRemoteModels) { if (a) throw Error(`\`local_files_only=true\` or \`env.allowRemoteModels=false\` and file was not found locally at "${h}".`); return null } if (r = await getFile(p), r.status !== 200) return handleError(r.status, p, a); o = t } e = c && typeof Response < "u" && r instanceof Response && r.status === 200 } dispatchCallback(u.progress_callback, { status: "download", name: b, file: n }); const d = { status: "progress", name: b, file: n }; let g; return u.progress_callback ? i && typeof navigator < "u" && /firefox/i.test(navigator.userAgent) ? (g = new Uint8Array(await r.arrayBuffer()), dispatchCallback(u.progress_callback, { ...d, progress: 100, loaded: g.length, total: g.length })) : g = await readResponse(r, m => { dispatchCallback(u.progress_callback, { ...d, ...m }) }) : g = new Uint8Array(await r.arrayBuffer()), e && o && await c.match(o) === void 0 && await c.put(o, new Response(g, { headers: r.headers })).catch(m => { console.warn(`Unable to add response to browser cache: ${m}.`) }), dispatchCallback(u.progress_callback, { status: "done", name: b, file: n }), g } async function getModelJSON(b, n, a = !0, u = {}) { let c = await getModelFile(b, n, a, u); if (c === null) return {}; let s = new TextDecoder("utf-8").decode(c); return JSON.parse(s) } async function readResponse(b, n) { const a = b.headers.get("Content-Length"); a === null && console.warn("Unable to determine content-length from response headers. Will expand buffer when needed."); let u = parseInt(a ?? "0"), c = new Uint8Array(u), f = 0; const s = b.body.getReader(); async function h() { const { done: p, value: l } = await s.read(); if (p) return; let o = f + l.length; if (o > u) { u = o; let e = new Uint8Array(u); e.set(c), c = e } c.set(l, f), f = o; const t = f / u * 100; return n({ progress: t, loaded: f, total: u }), h() } return await h(), c } function pathJoin(...b) { return b = b.map((n, a) => (a && (n = n.replace(new RegExp("^/"), "")), a !== b.length - 1 && (n = n.replace(new RegExp("/$"), "")), n)), b.join("/") } function interpolate_data(b, [n, a, u], [c, f], s = "bilinear", h = !1) { const p = f / u, l = c / a, o = new b.constructor(c * f * n), t = a * u, e = c * f; for (let r = 0; r < c; ++r) for (let i = 0; i < f; ++i) { const d = r * f + i, g = (i + .5) / p - .5, m = (r + .5) / l - .5; let _ = Math.floor(g), y = Math.floor(m); const T = Math.min(_ + 1, u - 1), w = Math.min(y + 1, a - 1); _ = Math.max(_, 0), y = Math.max(y, 0); const S = g - _, O = m - y, E = (1 - S) * (1 - O), v = S * (1 - O), P = (1 - S) * O, L = S * O, V = y * u, R = w * u, k = V + _, Y = V + T, C = R + _, $ = R + T; for (let X = 0; X < n; ++X) { const z = X * t; o[X * e + d] = E * b[z + k] + v * b[z + Y] + P * b[z + C] + L * b[z + $] } } return o } function transpose_data(b, n, a) { const u = new Array(a.length), c = new Array(a.length); for (let h = a.length - 1, p = 1; h >= 0; --h) c[h] = p, u[h] = n[a[h]], p *= u[h]; const f = a.map((h, p) => c[a.indexOf(p)]), s = new b.constructor(b.length); for (let h = 0; h < b.length; ++h) { let p = 0; for (let l = n.length - 1, o = h; l >= 0; --l) p += o % n[l] * f[l], o = Math.floor(o / n[l]); s[p] = b[h] } return [s, u] } function softmax(b) { const n = max(b)[0], a = b.map(f => Math.exp(f - n)), u = a.reduce((f, s) => f + s, 0); return a.map(f => f / u) } function log_softmax(b) { return softmax(b).map(u => Math.log(u)) } function getTopItems(b, n = 0) { return b = Array.from(b).map((a, u) => [u, a]).sort((a, u) => u[1] - a[1]), n !== null && n > 0 && (b = b.slice(0, n)), b } function min(b) { if (b.length === 0) throw Error("Array must not be empty"); let n = b[0], a = 0; for (let u = 1; u < b.length; ++u) b[u] < n && (n = b[u], a = u); return [n, a] } function max(b) { if (b.length === 0) throw Error("Array must not be empty"); let n = b[0], a = 0; for (let u = 1; u < b.length; ++u) b[u] > n && (n = b[u], a = u); return [Number(n), a] } function isPowerOfTwo(b) { return b > 0 && (b & b - 1) === 0 } class P2FFT { constructor(n) { if (this.size = n | 0, this.size <= 1 || !isPowerOfTwo(this.size)) throw new Error("FFT size must be a power of two larger than 1"); this._csize = n << 1, this.table = new Float64Array(this.size * 2); for (let u = 0; u < this.table.length; u += 2) { const c = Math.PI * u / this.size; this.table[u] = Math.cos(c), this.table[u + 1] = -Math.sin(c) } let a = 0; for (let u = 1; this.size > u; u <<= 1) ++a; this._width = a % 2 === 0 ? a - 1 : a, this._bitrev = new Int32Array(1 << this._width); for (let u = 0; u < this._bitrev.length; ++u) { this._bitrev[u] = 0; for (let c = 0; c < this._width; c += 2) { const f = this._width - c - 2; this._bitrev[u] |= (u >>> c & 3) << f } } } createComplexArray() { return new Float64Array(this._csize) } fromComplexArray(n, a) { const u = a || new Array(n.length >>> 1); for (let c = 0; c < n.length; c += 2) u[c >>> 1] = n[c]; return u } toComplexArray(n, a) { const u = a || this.createComplexArray(); for (let c = 0; c < u.length; c += 2) u[c] = n[c >>> 1], u[c + 1] = 0; return u } completeSpectrum(n) { const a = this._csize, u = a >>> 1; for (let c = 2; c < u; c += 2) n[a - c] = n[c], n[a - c + 1] = -n[c + 1] } transform(n, a) { if (n === a) throw new Error("Input and output buffers must be different"); this._transform4(n, a, 1) } realTransform(n, a) { if (n === a) throw new Error("Input and output buffers must be different"); this._realTransform4(n, a, 1) } inverseTransform(n, a) { if (n === a) throw new Error("Input and output buffers must be different"); this._transform4(n, a, -1); for (let u = 0; u < n.length; ++u) n[u] /= this.size } _transform4(n, a, u) { const c = this._csize; let s = 1 << this._width, h = c / s << 1, p, l; const o = this._bitrev; if (h === 4) for (p = 0, l = 0; p < c; p += h, ++l) { const t = o[l]; this._singleTransform2(a, n, p, t, s) } else for (p = 0, l = 0; p < c; p += h, ++l) { const t = o[l]; this._singleTransform4(a, n, p, t, s, u) } for (s >>= 2; s >= 2; s >>= 2) { h = c / s << 1; const t = h >>> 2; for (p = 0; p < c; p += h) { const e = p + t - 1; for (let r = p, i = 0; r < e; r += 2, i += s) { const d = r, g = d + t, m = g + t, _ = m + t, y = n[d], T = n[d + 1], w = n[g], S = n[g + 1], O = n[m], E = n[m + 1], v = n[_], P = n[_ + 1], L = this.table[i], V = u * this.table[i + 1], R = w * L - S * V, k = w * V + S * L, Y = this.table[2 * i], C = u * this.table[2 * i + 1], $ = O * Y - E * C, X = O * C + E * Y, z = this.table[3 * i], Z = u * this.table[3 * i + 1], J = v * z - P * Z, ue = v * Z + P * z, Se = y + $, Te = T + X, se = y - $, ye = T - X, be = R + J, Ie = k + ue, Le = u * (R - J), ve = u * (k - ue); n[d] = Se + be, n[d + 1] = Te + Ie, n[g] = se + ve, n[g + 1] = ye - Le, n[m] = Se - be, n[m + 1] = Te - Ie, n[_] = se - ve, n[_ + 1] = ye + Le } } } } _singleTransform2(n, a, u, c, f) { const s = n[c], h = n[c + 1], p = n[c + f], l = n[c + f + 1]; a[u] = s + p, a[u + 1] = h + l, a[u + 2] = s - p, a[u + 3] = h - l } _singleTransform4(n, a, u, c, f, s) { const h = f * 2, p = f * 3, l = n[c], o = n[c + 1], t = n[c + f], e = n[c + f + 1], r = n[c + h], i = n[c + h + 1], d = n[c + p], g = n[c + p + 1], m = l + r, _ = o + i, y = l - r, T = o - i, w = t + d, S = e + g, O = s * (t - d), E = s * (e - g); a[u] = m + w, a[u + 1] = _ + S, a[u + 2] = y + E, a[u + 3] = T - O, a[u + 4] = m - w, a[u + 5] = _ - S, a[u + 6] = y - E, a[u + 7] = T + O } _realTransform4(n, a, u) { const c = this._csize; let s = 1 << this._width, h = c / s << 1, p, l; const o = this._bitrev; if (h === 4) for (p = 0, l = 0; p < c; p += h, ++l) { const t = o[l]; this._singleRealTransform2(a, n, p, t >>> 1, s >>> 1) } else for (p = 0, l = 0; p < c; p += h, ++l) { const t = o[l]; this._singleRealTransform4(a, n, p, t >>> 1, s >>> 1, u) } for (s >>= 2; s >= 2; s >>= 2) { h = c / s << 1; const t = h >>> 2; for (p = 0; p < c; p += h) { const e = p + t - 1; for (let r = p, i = 0; r < e; r += 2, i += s) { const d = r, g = d + t, m = g + t, _ = m + t, y = n[d], T = n[d + 1], w = n[g], S = n[g + 1], O = n[m], E = n[m + 1], v = n[_], P = n[_ + 1], L = this.table[i], V = u * this.table[i + 1], R = w * L - S * V, k = w * V + S * L, Y = this.table[2 * i], C = u * this.table[2 * i + 1], $ = O * Y - E * C, X = O * C + E * Y, z = this.table[3 * i], Z = u * this.table[3 * i + 1], J = v * z - P * Z, ue = v * Z + P * z, Se = y + $, Te = T + X, se = y - $, ye = T - X, be = R + J, Ie = k + ue, Le = u * (R - J), ve = u * (k - ue); n[d] = Se + be, n[d + 1] = Te + Ie, n[g] = se + ve, n[g + 1] = ye - Le, n[m] = Se - be, n[m + 1] = Te - Ie, n[_] = se - ve, n[_ + 1] = ye + Le } } } } _singleRealTransform2(n, a, u, c, f) { const s = n[c], h = n[c + f]; a[u] = s + h, a[u + 1] = 0, a[u + 2] = s - h, a[u + 3] = 0 } _singleRealTransform4(n, a, u, c, f, s) { const h = f * 2, p = f * 3, l = n[c], o = n[c + f], t = n[c + h], e = n[c + p], r = l + t, i = l - t, d = o + e, g = s * (o - e); a[u] = r + d, a[u + 1] = 0, a[u + 2] = i, a[u + 3] = -g, a[u + 4] = r - d, a[u + 5] = 0, a[u + 6] = i, a[u + 7] = g } } class NP2FFT { constructor(n) { const a = 2 * (n - 1), u = 2 * (2 * n - 1), c = 2 ** Math.ceil(Math.log2(u)); this.bufferSize = c, this._a = a; const f = new Float64Array(u), s = new Float64Array(c); this._chirpBuffer = new Float64Array(c), this._buffer1 = new Float64Array(c), this._buffer2 = new Float64Array(c), this._outBuffer1 = new Float64Array(c), this._outBuffer2 = new Float64Array(c); const h = -2 * Math.PI / n, p = Math.cos(h), l = Math.sin(h); for (let o = 0; o < u >> 1; ++o) { const t = (o + 1 - n) ** 2 / 2, e = Math.sqrt(p ** 2 + l ** 2) ** t, r = t * Math.atan2(l, p), i = 2 * o; f[i] = e * Math.cos(r), f[i + 1] = e * Math.sin(r), s[i] = f[i], s[i + 1] = -f[i + 1] } this._slicedChirpBuffer = f.subarray(a, u), this._f = new P2FFT(c >> 1), this._f.transform(this._chirpBuffer, s) } _transform(n, a, u) { const c = this._buffer1, f = this._buffer2, s = this._outBuffer1, h = this._outBuffer2, p = this._chirpBuffer, l = this._slicedChirpBuffer, o = this._a; if (u) for (let t = 0; t < l.length; t += 2) { const e = t + 1, r = t >> 1, i = a[r]; c[t] = i * l[t], c[e] = i * l[e] } else for (let t = 0; t < l.length; t += 2) { const e = t + 1; c[t] = a[t] * l[t] - a[e] * l[e], c[e] = a[t] * l[e] + a[e] * l[t] } this._f.transform(s, c); for (let t = 0; t < p.length; t += 2) { const e = t + 1; f[t] = s[t] * p[t] - s[e] * p[e], f[e] = s[t] * p[e] + s[e] * p[t] } this._f.inverseTransform(h, f); for (let t = 0; t < h.length; t += 2) { const e = h[t + o], r = h[t + o + 1], i = l[t], d = l[t + 1]; n[t] = e * i - r * d, n[t + 1] = e * d + r * i } } transform(n, a) { this._transform(n, a, !1) } realTransform(n, a) { this._transform(n, a, !0) } } class FFT { constructor(n) { this.fft_length = n, this.isPowerOfTwo = isPowerOfTwo(n), this.isPowerOfTwo ? (this.fft = new P2FFT(n), this.outputBufferSize = 2 * n) : (this.fft = new NP2FFT(n), this.outputBufferSize = this.fft.bufferSize) } realTransform(n, a) { this.fft.realTransform(n, a) } transform(n, a) { this.fft.transform(n, a) } } function medianFilter(b, n) { if (n % 2 === 0 || n <= 0) throw new Error("Window size must be a positive odd number"); const a = new b.constructor(b.length), u = new b.constructor(n), c = Math.floor(n / 2); for (let f = 0; f < b.length; ++f) { let s = 0; for (let h = -c; h <= c; ++h) { let p = f + h; p < 0 ? p = Math.abs(p) : p >= b.length && (p = 2 * (b.length - 1) - p), u[s++] = b[p] } u.sort(), a[f] = u[c] } return a } const DataTypeMap = Object.freeze({ float32: Float32Array, float64: Float64Array, string: Array, int8: Int8Array, uint8: Uint8Array, int16: Int16Array, uint16: Uint16Array, int32: Int32Array, uint32: Uint32Array, int64: BigInt64Array, uint64: BigUint64Array, bool: Uint8Array }), ONNXTensor$1 = ONNX.Tensor; class Tensor { dims; type; data; size; constructor(...n) { return n[0] instanceof ONNXTensor$1 ? Object.assign(this, n[0]) : Object.assign(this, new ONNXTensor$1(n[0], n[1], n[2])), new Proxy(this, { get: (a, u) => { if (typeof u == "string") { let c = Number(u); if (Number.isInteger(c)) return a._getitem(c) } return a[u] }, set: (a, u, c) => a[u] = c }) }*[Symbol.iterator]() { const [n, ...a] = this.dims; if (a.length > 0) { const u = a.reduce((c, f) => c * f); for (let c = 0; c < n; ++c) yield this._subarray(c, u, a) } else yield* this.data } _getitem(n) { const [a, ...u] = this.dims; if (n = safeIndex(n, a), u.length > 0) { const c = u.reduce((f, s) => f * s); return this._subarray(n, c, u) } else return new Tensor(this.type, [this.data[n]], u) } indexOf(n) { for (let a = 0; a < this.data.length; ++a) if (this.data[a] == n) return a; return -1 } _subarray(n, a, u) { const c = n * a, f = (n + 1) * a, s = "subarray" in this.data ? this.data.subarray(c, f) : this.data.slice(c, f); return new Tensor(this.type, s, u) } item() { if (this.data.length !== 1) throw new Error(`a Tensor with ${this.data.length} elements cannot be converted to Scalar`); return this.data[0] } tolist() { return reshape(this.data, this.dims) } sigmoid() { return this.clone().sigmoid_() } sigmoid_() { for (let n = 0; n < this.data.length; ++n) this.data[n] = 1 / (1 + Math.exp(-this.data[n])); return this } mul(n) { return this.clone().mul_(n) } mul_(n) { for (let a = 0; a < this.data.length; ++a) this.data[a] *= n; return this } add(n) { return this.clone().add_(n) } add_(n) { for (let a = 0; a < this.data.length; ++a) this.data[a] += n; return this } clone() { return new Tensor(this.type, this.data.slice(), this.dims.slice()) } slice(...n) { let a = [], u = []; for (let p = 0; p < this.dims.length; ++p) { let l = n[p]; if (l == null) u.push([0, this.dims[p]]), a.push(this.dims[p]); else if (typeof l == "number") l = safeIndex(l, this.dims[p], p), u.push([l, l + 1]); else if (Array.isArray(l) && l.length === 2) { if (l[0] > l[1]) throw new Error(`Invalid slice: ${l}`); let o = [Math.max(l[0], 0), Math.min(l[1], this.dims[p])]; u.push(o), a.push(o[1] - o[0]) } else throw new Error(`Invalid slice: ${l}`) } let c = u.map(([p, l]) => l - p), f = c.reduce((p, l) => p * l), s = new this.data.constructor(f); const h = this.stride(); for (let p = 0; p < f; ++p) { let l = 0; for (let o = c.length - 1, t = p; o >= 0; --o) { const e = c[o]; l += (t % e + u[o][0]) * h[o], t = Math.floor(t / e) } s[p] = this.data[l] } return new Tensor(this.type, s, a) } transpose(...n) { return transpose(this, n) } sum(n = null, a = !1) { return this.norm(1, n, a) } norm(n = "fro", a = null, u = !1) { if (n === "fro") n = 2; else if (typeof n == "string") throw Error(`Unsupported norm: ${n}`); if (a === null) { let s = this.data.reduce((h, p) => h + p ** n, 0) ** (1 / n); return new Tensor(this.type, [s], []) } a = safeIndex(a, this.dims.length); const c = this.dims.slice(); c[a] = 1; const f = new this.data.constructor(this.data.length / this.dims[a]); for (let s = 0; s < this.data.length; ++s) { let h = 0; for (let p = this.dims.length - 1, l = s, o = 1; p >= 0; --p) { const t = this.dims[p]; if (p !== a) { const e = l % t; h += e * o, o *= c[p] } l = Math.floor(l / t) } f[h] += this.data[s] ** n } if (n !== 1) for (let s = 0; s < f.length; ++s) f[s] = f[s] ** (1 / n); return u || c.splice(a, 1), new Tensor(this.type, f, c) } normalize_(n = 2, a = 1) { a = safeIndex(a, this.dims.length); const u = this.norm(n, a, !0); for (let c = 0; c < this.data.length; ++c) { let f = 0; for (let s = this.dims.length - 1, h = c, p = 1; s >= 0; --s) { const l = this.dims[s]; if (s !== a) { const o = h % l; f += o * p, p *= this.dims[s] } h = Math.floor(h / l) } this.data[c] /= u.data[f] } return this } normalize(n = 2, a = 1) { return this.clone().normalize_(n, a) } stride() { return dimsToStride(this.dims) } squeeze(n = null) { return new Tensor(this.type, this.data, calc_squeeze_dims(this.dims, n)) } squeeze_(n = null) { return this.dims = calc_squeeze_dims(this.dims, n), this } unsqueeze(n = null) { return new Tensor(this.type, this.data, calc_unsqueeze_dims(this.dims, n)) } unsqueeze_(n = null) { return this.dims = calc_unsqueeze_dims(this.dims, n), this } flatten_(n = 0, a = -1) { a = (a + this.dims.length) % this.dims.length; let u = this.dims.slice(0, n), c = this.dims.slice(n, a + 1), f = this.dims.slice(a + 1); return this.dims = [...u, c.reduce((s, h) => s * h, 1), ...f], this } flatten(n = 0, a = -1) { return this.clone().flatten_(n, a) } view(...n) { let a = -1; for (let u = 0; u < n.length; ++u) if (n[u] === -1) { if (a !== -1) throw new Error("Only one dimension can be inferred"); a = u } if (a !== -1) { const u = n.reduce((c, f, s) => s !== a ? c * f : c, 1); n[a] = this.data.length / u } return new Tensor(this.type, this.data, n) } neg_() { for (let n = 0; n < this.data.length; ++n) this.data[n] = -this.data[n]; return this } neg() { return this.clone().neg_() } clamp_(n, a) { for (let u = 0; u < this.data.length; ++u) this.data[u] = Math.min(Math.max(this.data[u], n), a); return this } clamp(n, a) { return this.clone().clamp_(n, a) } round_() { for (let n = 0; n < this.data.length; ++n) this.data[n] = Math.round(this.data[n]); return this } round() { return this.clone().round_() } to(n) { if (this.type === n) return this; if (!DataTypeMap.hasOwnProperty(n)) throw new Error(`Unsupported type: ${n}`); return new Tensor(n, DataTypeMap[n].from(this.data), this.dims) } } function reshape(b, n) { const a = b.length, u = n.reduce((f, s) => f * s); if (a !== u) throw Error(`cannot reshape array of size ${a} into shape (${n})`); let c = b; for (let f = n.length - 1; f >= 0; f--) c = c.reduce((s, h) => { let p = s[s.length - 1]; return p.length < n[f] ? p.push(h) : s.push([h]), s }, [ [] ]); return c[0] } function transpose(b, n) { const [a, u] = transpose_data(b.data, b.dims, n); return new Tensor(b.type, a, u) } function interpolate(b, [n, a], u = "bilinear", c = !1) { const f = b.dims.at(-3) ?? 1, s = b.dims.at(-2), h = b.dims.at(-1); let p = interpolate_data(b.data, [f, s, h], [n, a], u, c); return new Tensor(b.type, p, [f, n, a]) } function calc_squeeze_dims(b, n) { return b = b.slice(), n === null ? b = b.filter(a => a !== 1) : typeof n == "number" ? b[n] === 1 && b.splice(n, 1) : Array.isArray(n) && (b = b.filter((a, u) => a !== 1 || !n.includes(u))), b } function calc_unsqueeze_dims(b, n) { return n = safeIndex(n, b.length + 1), b = b.slice(), b.splice(n, 0, 1), b } function safeIndex(b, n, a = null) { if (b < -n || b >= n) throw new Error(`IndexError: index ${b} is out of bounds for dimension${a===null?"":" "+a} with size ${n}`); return b < 0 && (b = (b % n + n) % n), b } function cat(b, n = 0) { n = safeIndex(n, b[0].dims.length); const a = b[0].dims.slice(); a[n] = b.reduce((s, h) => s + h.dims[n], 0); const u = a.reduce((s, h) => s * h, 1), c = new b[0].data.constructor(u), f = b[0].type; if (n === 0) { let s = 0; for (let h of b) c.set(h.data, s), s += h.data.length } else { let s = 0; for (let h = 0; h < b.length; ++h) { let p = b[h]; for (let l = 0; l < p.data.length; ++l) { let o = 0; for (let t = p.dims.length - 1, e = l, r = 1; t >= 0; --t) { const i = p.dims[t]; let d = e % i; t === n && (d += s), o += d * r, r *= a[t], e = Math.floor(e / i) } c[o] = p.data[l] } s += p.dims[n] } } return new Tensor(f, c, a) } function stack(b, n = 0) { return cat(b.map(a => a.unsqueeze(n)), n) } function std_mean(b, n = null, a = 1, u = !1) { if (n === null) { const l = b.data.reduce((r, i) => r + i, 0) / b.data.length, o = Math.sqrt(b.data.reduce((r, i) => r + (i - l) ** 2, 0) / (b.data.length - a)), t = new Tensor(b.type, [l], []); return [new Tensor(b.type, [o], []), t] } n = safeIndex(n, b.dims.length); const c = mean(b, n, u), f = b.dims.slice(); f[n] = 1; const s = new b.data.constructor(b.data.length / b.dims[n]); for (let p = 0; p < b.data.length; ++p) { let l = 0; for (let o = b.dims.length - 1, t = p, e = 1; o >= 0; --o) { const r = b.dims[o]; if (o !== n) { const i = t % r; l += i * e, e *= f[o] } t = Math.floor(t / r) } s[l] += (b.data[p] - c.data[l]) ** 2 } for (let p = 0; p < s.length; ++p) s[p] = Math.sqrt(s[p] / (b.dims[n] - a)); return u || f.splice(n, 1), [new Tensor(b.type, s, f), c] } function mean(b, n = null, a = !1) { if (n === null) { let f = b.data.reduce((s, h) => s + h, 0); return new Tensor(b.type, [f / b.data.length], []) } n = safeIndex(n, b.dims.length); const u = b.dims.slice(); u[n] = 1; const c = new b.data.constructor(b.data.length / b.dims[n]); for (let f = 0; f < b.data.length; ++f) { let s = 0; for (let h = b.dims.length - 1, p = f, l = 1; h >= 0; --h) { const o = b.dims[h]; if (h !== n) { const t = p % o; s += t * l, l *= u[h] } p = Math.floor(p / o) } c[s] += b.data[f] } if (b.dims[n] !== 1) for (let f = 0; f < c.length; ++f) c[f] = c[f] / b.dims[n]; return a || u.splice(n, 1), new Tensor(b.type, c, u) } function dynamicTimeWarping(b) { const [n, a] = b.dims, u = [n + 1, a + 1], c = new Tensor("float32", new Float32Array(u[0] * u[1]).fill(1 / 0), u), f = new Tensor("float32", new Float32Array(u[0] * u[1]).fill(-1), u); c[0].data[0] = 0; for (let o = 1; o < a + 1; ++o) for (let t = 1; t < n + 1; ++t) { const e = c[t - 1][o - 1].item(), r = c[t - 1][o].item(), i = c[t][o - 1].item(); let d, g; e < r && e < i ? (d = e, g = 0) : r < e && r < i ? (d = r, g = 1) : (d = i, g = 2), c[t].data[o] = b[t - 1][o - 1].item() + d, f[t].data[o] = g } let s = n, h = a; f.data.fill(2, 0, u[1]); for (let o = 0; o < u[0]; ++o) f[o].data[0] = 1; let p = [], l = []; for (; s > 0 || h > 0;) switch (p.push(s - 1), l.push(h - 1), f[s][h].item()) { case 0: --s, --h; break; case 1: --s; break; case 2: --h; break; default: throw new Error(`Internal error in dynamic time warping. Unexpected trace[${s}, ${h}]. Please file a bug report.`) } return p.reverse(), l.reverse(), [p, l] } function dimsToStride(b) { const n = new Array(b.length); for (let a = b.length - 1, u = 1; a >= 0; --a) n[a] = u, u *= b[a]; return n } function ones(b) { const n = b.reduce((a, u) => a * u, 1); return new Tensor("int64", new BigInt64Array(n).fill(1n), b) } function ones_like(b) { return ones(b.dims) } var TOKEN_TYPES = Object.freeze({ Text: "Text", NumericLiteral: "NumericLiteral", BooleanLiteral: "BooleanLiteral", StringLiteral: "StringLiteral", Identifier: "Identifier", Equals: "Equals", OpenParen: "OpenParen", CloseParen: "CloseParen", OpenStatement: "OpenStatement", CloseStatement: "CloseStatement", OpenExpression: "OpenExpression", CloseExpression: "CloseExpression", OpenSquareBracket: "OpenSquareBracket", CloseSquareBracket: "CloseSquareBracket", Comma: "Comma", Dot: "Dot", Colon: "Colon", Pipe: "Pipe", CallOperator: "CallOperator", AdditiveBinaryOperator: "AdditiveBinaryOperator", MultiplicativeBinaryOperator: "MultiplicativeBinaryOperator", ComparisonBinaryOperator: "ComparisonBinaryOperator", UnaryOperator: "UnaryOperator", Set: "Set", If: "If", For: "For", In: "In", NotIn: "NotIn", Else: "Else", EndIf: "EndIf", ElseIf: "ElseIf", EndFor: "EndFor", And: "And", Or: "Or", Not: "UnaryOperator" }); Object.freeze({ set: TOKEN_TYPES.Set, for: TOKEN_TYPES.For, in: TOKEN_TYPES.In, if: TOKEN_TYPES.If, else: TOKEN_TYPES.Else, endif: TOKEN_TYPES.EndIf, elif: TOKEN_TYPES.ElseIf, endfor: TOKEN_TYPES.EndFor, and: TOKEN_TYPES.And, or: TOKEN_TYPES.Or, not: TOKEN_TYPES.Not, "not in": TOKEN_TYPES.NotIn, true: TOKEN_TYPES.BooleanLiteral, false: TOKEN_TYPES.BooleanLiteral }); TOKEN_TYPES.OpenStatement, TOKEN_TYPES.CloseStatement, TOKEN_TYPES.OpenExpression, TOKEN_TYPES.CloseExpression, TOKEN_TYPES.OpenParen, TOKEN_TYPES.CloseParen, TOKEN_TYPES.OpenSquareBracket, TOKEN_TYPES.CloseSquareBracket, TOKEN_TYPES.Comma, TOKEN_TYPES.Dot, TOKEN_TYPES.Colon, TOKEN_TYPES.Pipe, TOKEN_TYPES.ComparisonBinaryOperator, TOKEN_TYPES.ComparisonBinaryOperator, TOKEN_TYPES.ComparisonBinaryOperator, TOKEN_TYPES.ComparisonBinaryOperator, TOKEN_TYPES.ComparisonBinaryOperator, TOKEN_TYPES.ComparisonBinaryOperator, TOKEN_TYPES.AdditiveBinaryOperator, TOKEN_TYPES.AdditiveBinaryOperator, TOKEN_TYPES.MultiplicativeBinaryOperator, TOKEN_TYPES.MultiplicativeBinaryOperator, TOKEN_TYPES.MultiplicativeBinaryOperator, TOKEN_TYPES.Equals; const BYTES_TO_UNICODE = (() => { const b = [...Array.from({ length: 94 }, (c, f) => f + 33), ...Array.from({ length: 12 }, (c, f) => f + 161), ...Array.from({ length: 82 }, (c, f) => f + 174)], n = b.slice(); let a = 0; for (let c = 0; c < 256; ++c) b.includes(c) || (b.push(c), n.push(256 + a), a += 1); const u = n.map(c => String.fromCharCode(c)); return Object.fromEntries(b.map((c, f) => [c, u[f]])) })(); reverseDictionary(BYTES_TO_UNICODE); const WHISPER_LANGUAGES = [ ["en", "english"], ["zh", "chinese"], ["de", "german"], ["es", "spanish"], ["ru", "russian"], ["ko", "korean"], ["fr", "french"], ["ja", "japanese"], ["pt", "portuguese"], ["tr", "turkish"], ["pl", "polish"], ["ca", "catalan"], ["nl", "dutch"], ["ar", "arabic"], ["sv", "swedish"], ["it", "italian"], ["id", "indonesian"], ["hi", "hindi"], ["fi", "finnish"], ["vi", "vietnamese"], ["he", "hebrew"], ["uk", "ukrainian"], ["el", "greek"], ["ms", "malay"], ["cs", "czech"], ["ro", "romanian"], ["da", "danish"], ["hu", "hungarian"], ["ta", "tamil"], ["no", "norwegian"], ["th", "thai"], ["ur", "urdu"], ["hr", "croatian"], ["bg", "bulgarian"], ["lt", "lithuanian"], ["la", "latin"], ["mi", "maori"], ["ml", "malayalam"], ["cy", "welsh"], ["sk", "slovak"], ["te", "telugu"], ["fa", "persian"], ["lv", "latvian"], ["bn", "bengali"], ["sr", "serbian"], ["az", "azerbaijani"], ["sl", "slovenian"], ["kn", "kannada"], ["et", "estonian"], ["mk", "macedonian"], ["br", "breton"], ["eu", "basque"], ["is", "icelandic"], ["hy", "armenian"], ["ne", "nepali"], ["mn", "mongolian"], ["bs", "bosnian"], ["kk", "kazakh"], ["sq", "albanian"], ["sw", "swahili"], ["gl", "galician"], ["mr", "marathi"], ["pa", "punjabi"], ["si", "sinhala"], ["km", "khmer"], ["sn", "shona"], ["yo", "yoruba"], ["so", "somali"], ["af", "afrikaans"], ["oc", "occitan"], ["ka", "georgian"], ["be", "belarusian"], ["tg", "tajik"], ["sd", "sindhi"], ["gu", "gujarati"], ["am", "amharic"], ["yi", "yiddish"], ["lo", "lao"], ["uz", "uzbek"], ["fo", "faroese"], ["ht", "haitian creole"], ["ps", "pashto"], ["tk", "turkmen"], ["nn", "nynorsk"], ["mt", "maltese"], ["sa", "sanskrit"], ["lb", "luxembourgish"], ["my", "myanmar"], ["bo", "tibetan"], ["tl", "tagalog"], ["mg", "malagasy"], ["as", "assamese"], ["tt", "tatar"], ["haw", "hawaiian"], ["ln", "lingala"], ["ha", "hausa"], ["ba", "bashkir"], ["jw", "javanese"], ["su", "sundanese"] ]; new Map(WHISPER_LANGUAGES); async function loadConfig(b, n) { return await getModelJSON(b, "config.json", !0, n) } class PretrainedConfig { constructor(n) { this.model_type = null, this.is_encoder_decoder = !1, Object.assign(this, n) } static async from_pretrained(n, { progress_callback: a = null, config: u = null, cache_dir: c = null, local_files_only: f = !1, revision: s = "main" } = {}) { let h = u ?? await loadConfig(n, { progress_callback: a, config: u, cache_dir: c, local_files_only: f, revision: s }); return new this(h) } } class AutoConfig { static async from_pretrained(...n) { return PretrainedConfig.from_pretrained(...n) } } class LogitsProcessorList extends Callable { constructor() { super(), this.processors = [] } push(n) { this.processors.push(n) } extend(n) { this.processors.push(...n) } _call(n, a) { for (let u of a) this.processors.forEach(c => c(n, u)) } [Symbol.iterator]() { return this.processors.values() } } class LogitsProcessor extends Callable { _call(n, a) { throw Error("`_call` should be implemented in a subclass") } } class ForceTokensLogitsProcessor extends LogitsProcessor { constructor(n) { super(), this.force_token_map = Object.fromEntries(n ?? []) } _call(n, a) { let u = this.force_token_map[n.length]; return exists(u) && (a.data.fill(-1 / 0), a.data[u] = 0), a } } class ForcedBOSTokenLogitsProcessor extends LogitsProcessor { constructor(n) { super(), this.bos_token_id = n } _call(n, a) { return n.length === 1 && (a.data.fill(-1 / 0), a.data[this.bos_token_id] = 0), a } } class ForcedEOSTokenLogitsProcessor extends LogitsProcessor { constructor(n, a) { super(), this.max_length = n, this.forced_eos_token_id = a } _call(n, a) {} } class SuppressTokensAtBeginLogitsProcessor extends LogitsProcessor { constructor(n, a) { super(), this.begin_suppress_tokens = n, this.begin_index = a } _call(n, a) { if (n.length === this.begin_index) for (let u of this.begin_suppress_tokens) a.data[u] = -1 / 0; return a } } class WhisperTimeStampLogitsProcessor extends LogitsProcessor { constructor(n) { super(), this.eos_token_id = n.eos_token_id, this.no_timestamps_token_id = n.no_timestamps_token_id, this.timestamp_begin = this.no_timestamps_token_id + 1, this.begin_index = (n.forced_decoder_ids || []).length + 2, n.forced_decoder_ids.slice(-1)[0][1] === this.no_timestamps_token_id && (this.begin_index -= 1), this.max_initial_timestamp_index = n.max_initial_timestamp_index } _call(n, a) { const u = a.data; if (u[this.no_timestamps_token_id] = -1 / 0, n.length === this.begin_index - 1) return u.fill(-1 / 0), u[this.timestamp_begin] = 0, a; const c = n.slice(this.begin_index), f = c.length >= 1 && c[c.length - 1] >= this.timestamp_begin, s = c.length < 2 || c[c.length - 2] >= this.timestamp_begin; if (f && (s ? u.subarray(this.timestamp_begin).fill(-1 / 0) : u.subarray(0, this.eos_token_id).fill(-1 / 0)), n.length === this.begin_index && this.max_initial_timestamp_index !== null) { const o = this.timestamp_begin + this.max_initial_timestamp_index; u.subarray(o + 1).fill(-1 / 0) } const h = log_softmax(u), p = Math.log(h.subarray(this.timestamp_begin).map(Math.exp).reduce((o, t) => o + t)), l = max(h.subarray(0, this.timestamp_begin))[0]; return p > l && u.subarray(0, this.timestamp_begin).fill(-1 / 0), a } } class NoRepeatNGramLogitsProcessor extends LogitsProcessor { constructor(n) { super(), this.no_repeat_ngram_size = n } getNgrams(n) { const a = n.length, u = []; for (let f = 0; f < a + 1 - this.no_repeat_ngram_size; ++f) { const s = []; for (let h = 0; h < this.no_repeat_ngram_size; ++h) s.push(n[f + h]); u.push(s) } const c = new Map; for (const f of u) { const s = f.slice(0, f.length - 1), h = JSON.stringify(s), p = c.get(h) ?? []; p.push(f[f.length - 1]), c.set(h, p) } return c } getGeneratedNgrams(n, a) { const u = a.slice(a.length + 1 - this.no_repeat_ngram_size, a.length); return n.get(JSON.stringify(u)) ?? [] } calcBannedNgramTokens(n) { const a = []; if (n.length + 1 < this.no_repeat_ngram_size) return a; { const u = this.getNgrams(n); return this.getGeneratedNgrams(u, n) } } _call(n, a) { const u = this.calcBannedNgramTokens(n); for (const c of u) a.data[c] = -1 / 0; return a } } class RepetitionPenaltyLogitsProcessor extends LogitsProcessor { constructor(n) { super(), this.penalty = n } _call(n, a) { for (const u of n) a.data[u] < 0 ? a.data[u] *= this.penalty : a.data[u] /= this.penalty; return a } } class MinLengthLogitsProcessor extends LogitsProcessor { constructor(n, a) { super(), this.min_length = n, this.eos_token_id = Array.isArray(a) ? a : [a] } _call(n, a) { if (n.length < this.min_length) for (const u of this.eos_token_id) a.data[u] = -1 / 0; return a } } class MinNewTokensLengthLogitsProcessor extends LogitsProcessor { constructor(n, a, u) { super(), this.prompt_length_to_skip = n, this.min_new_tokens = a, this.eos_token_id = Array.isArray(u) ? u : [u] } _call(n, a) { if (n.length - this.prompt_length_to_skip < this.min_new_tokens) for (const c of this.eos_token_id) a.data[c] = -1 / 0; return a } } class NoBadWordsLogitsProcessor extends LogitsProcessor { constructor(n, a) { super(), this.bad_words_ids = n, this.eos_token_id = Array.isArray(a) ? a : [a] } _call(n, a) { for (const u of this.bad_words_ids) { let c = !0; for (let f = 1; f <= u.length - 1 && u.length < n.length; ++f) if (u.at(-f - 1) !== n.at(-f)) { c = !1; break } c && (a.data[u.at(-1)] = -1 / 0) } return a } } const GenerationConfig = class { constructor(b = {}) { this.max_length = b.max_length ?? 20, this.max_new_tokens = b.max_new_tokens ?? null, this.min_length = b.min_length ?? 0, this.min_new_tokens = b.min_new_tokens ?? null, this.early_stopping = b.early_stopping ?? !1, this.max_time = b.max_time ?? null, this.do_sample = b.do_sample ?? !1, this.num_beams = b.num_beams ?? 1, this.num_beam_groups = b.num_beam_groups ?? 1, this.penalty_alpha = b.penalty_alpha ?? null, this.use_cache = b.use_cache ?? !0, this.temperature = b.temperature ?? 1, this.top_k = b.top_k ?? 50, this.top_p = b.top_p ?? 1, this.typical_p = b.typical_p ?? 1, this.epsilon_cutoff = b.epsilon_cutoff ?? 0, this.eta_cutoff = b.eta_cutoff ?? 0, this.diversity_penalty = b.diversity_penalty ?? 0, this.repetition_penalty = b.repetition_penalty ?? 1, this.encoder_repetition_penalty = b.encoder_repetition_penalty ?? 1, this.length_penalty = b.length_penalty ?? 1, this.no_repeat_ngram_size = b.no_repeat_ngram_size ?? 0, this.bad_words_ids = b.bad_words_ids ?? null, this.force_words_ids = b.force_words_ids ?? null, this.renormalize_logits = b.renormalize_logits ?? !1, this.constraints = b.constraints ?? null, this.forced_bos_token_id = b.forced_bos_token_id ?? null, this.forced_eos_token_id = b.forced_eos_token_id ?? null, this.remove_invalid_values = b.remove_invalid_values ?? !1, this.exponential_decay_length_penalty = b.exponential_decay_length_penalty ?? null, this.suppress_tokens = b.suppress_tokens ?? null, this.begin_suppress_tokens = b.begin_suppress_tokens ?? null, this.forced_decoder_ids = b.forced_decoder_ids ?? null, this.num_return_sequences = b.num_return_sequences ?? 1, this.output_attentions = b.output_attentions ?? !1, this.output_hidden_states = b.output_hidden_states ?? !1, this.output_scores = b.output_scores ?? !1, this.return_dict_in_generate = b.return_dict_in_generate ?? !1, this.pad_token_id = b.pad_token_id ?? null, this.bos_token_id = b.bos_token_id ?? null, this.eos_token_id = b.eos_token_id ?? null, this.encoder_no_repeat_ngram_size = b.encoder_no_repeat_ngram_size ?? 0, this.decoder_start_token_id = b.decoder_start_token_id ?? null, this.generation_kwargs = b.generation_kwargs ?? {} } }; class Sampler extends Callable { constructor(n) { super(), this.generation_config = n } _call(n, a = -1) { return this.sample(n, a) } sample(n, a) { throw Error("sample should be implemented in subclasses.") } getLogits(n, a) { let u = n.dims.at(-1), c = n.data; if (a === -1) c = c.slice(-u); else { let f = a * u; c = c.slice(f, f + u) } return this.generation_config.temperature > 0 && (c = c.map(f => f / this.generation_config.temperature)), c } randomSelect(n) { let a = n.reduce((c, f) => c + f, 0), u = Math.random() * a; for (let c = 0; c < n.length; ++c) if (u -= n[c], u <= 0) return c; return 0 } static getSampler(n) { if (n.do_sample) return new MultinomialSampler(n); if (n.num_beams > 1) return new BeamSearchSampler(n); if (n.num_return_sequences > 1) throw Error(`num_return_sequences has to be 1 when doing greedy search, but is ${n.num_return_sequences}.`); return new GreedySampler(n) } } class GreedySampler extends Sampler { sample(n, a = -1) { let u = this.getLogits(n, a); return [ [max(u)[1], 0] ] } } class MultinomialSampler extends Sampler { sample(n, a = -1) { let u = n.dims.at(-1); this.generation_config.top_k > 0 && (u = Math.min(this.generation_config.top_k, u)); const c = this.getLogits(n, a), f = getTopItems(c, u), s = softmax(f.map(h => h[1])); return Array.from({ length: this.generation_config.num_beams }, () => { const h = this.randomSelect(s); return [f[h][0], Math.log(s[h])] }) } } class BeamSearchSampler extends Sampler { sample(n, a = -1) { let u = n.dims.at(-1); this.generation_config.top_k > 0 && (u = Math.min(this.generation_config.top_k, u)); const c = this.getLogits(n, a), f = getTopItems(c, u), s = softmax(f.map(h => h[1])); return Array.from({ length: this.generation_config.num_beams }, (h, p) => [f[p][0], Math.log(s[p])]) } } const { InferenceSession, Tensor: ONNXTensor, env } = ONNX, MODEL_TYPES = { EncoderOnly: 0, EncoderDecoder: 1, Seq2Seq: 2, Vision2Seq: 3, DecoderOnly: 4, MaskGeneration: 5 }, MODEL_TYPE_MAPPING = new Map, MODEL_NAME_TO_CLASS_MAPPING = new Map, MODEL_CLASS_TO_NAME_MAPPING = new Map; async function constructSession(b, n, a) { let u = `onnx/${n}${a.quantized?"_quantized":""}.onnx`, c = await getModelFile(b, u, !0, a); try { return await InferenceSession.create(c, { executionProviders }) } catch (f) { if (executionProviders.length === 1 && executionProviders[0] === "wasm") throw f; return console.warn(f), console.warn("Something went wrong during model construction (most likely a missing operation). Using `wasm` as a fallback. "), await InferenceSession.create(c, { executionProviders: ["wasm"] }) } } function validateInputs(b, n) { const a = Object.create(null), u = []; for (const s of b.inputNames) { const h = n[s]; if (!(h instanceof Tensor)) { u.push(s); continue } a[s] = env.wasm.proxy ? h.clone() : h } if (u.length > 0) throw new Error(`An error occurred during model execution: "Missing the following inputs: ${u.join(", ")}.`); const c = Object.keys(n).length, f = b.inputNames.length; if (c > f) { let s = Object.keys(n).filter(h => !b.inputNames.includes(h)); console.warn(`WARNING: Too many inputs were provided (${c} > ${f}). The following inputs will be ignored: "${s.join(", ")}".`) } return a } async function sessionRun(b, n) { const a = validateInputs(b, n); try { let u = await b.run(a); return u = replaceTensors(u), u } catch (u) { throw console.error(`An error occurred during model execution: "${u}".`), console.error("Inputs given to model:", a), u } } function replaceTensors(b) { for (let n in b) b[n] instanceof ONNXTensor ? b[n] = new Tensor(b[n]) : typeof b[n] == "object" && replaceTensors(b[n]); return b } function toI64Tensor(b) { if (b instanceof Tensor) return b; if (b.length === 0) throw Error("items must be non-empty"); if (Array.isArray(b[0])) { if (b.some(n => n.length !== b[0].length)) throw Error("Unable to create tensor, you should probably activate truncation and/or padding with 'padding=True' and/or 'truncation=True' to have batched tensors with the same length."); return new Tensor("int64", BigInt64Array.from(b.flat().map(n => BigInt(n))), [b.length, b[0].length]) } else return new Tensor("int64", BigInt64Array.from(b.map(n => BigInt(n))), [1, b.length]) } function prepareAttentionMask(b, n) { let a = b.config.pad_token_id ?? null, u = b.config.eos_token_id ?? null; isIntegralNumber(u) && (u = [u]); let c = n.indexOf(a) !== -1, f = u === null || !u.includes(a); if (c && f) { let s = BigInt64Array.from(n.data.map(h => h != a)); return new Tensor("int64", s, n.dims) } else return ones_like(n) } function preparePositionIds(b, n, a) { if (!b.inputNames.includes("position_ids")) return; const u = new BigInt64Array(n.attention_mask.data.length); for (let c = 0; c < n.attention_mask.dims[0]; ++c) { let f = c * n.attention_mask.dims[1], s = BigInt(0); for (let h = 0; h < n.attention_mask.dims[1]; ++h) { const p = f + h; n.attention_mask.data[p] === 0n ? u[p] = BigInt(1) : (u[p] = s, s += n.attention_mask.data[p]) } } n.position_ids = new Tensor("int64", u, n.attention_mask.dims), a && (n.position_ids = n.position_ids.slice(null, -1).unsqueeze_(-1)) } function boolTensor(b) { return new Tensor("bool", [b], [1]) } async function seq2seqForward(b, n) { let { encoder_outputs: a, past_key_values: u } = n; a || (a = (await encoderForward(b, n)).last_hidden_state); let c = { input_ids: n.decoder_input_ids, encoder_hidden_states: a }; const f = !!u; b.decoder_merged_session.inputNames.includes("use_cache_branch") && (c.use_cache_branch = boolTensor(f)), b.decoder_merged_session.inputNames.includes("encoder_attention_mask") && (c.encoder_attention_mask = n.attention_mask), preparePositionIds(b.decoder_merged_session, c, f), b.addPastKeyValues(c, u); const s = await sessionRun(b.decoder_merged_session, c); let h = s.logits; u = b.getPastKeyValues(s, u); const p = b.getAttentions(s); return new Seq2SeqLMOutput({ logits: h, past_key_values: u, encoder_outputs: a, ...p }) } function seq2seqStartBeams(b, n, a, u) { let c = [], f = 0; const s = b.requires_attention_mask ?? !0; let h = a.decoder_input_ids ?? a.decoder_start_token_id ?? a.bos_token_id ?? a.eos_token_id; h instanceof Tensor ? h = h.tolist().flat() : Array.isArray(h) || (h = [h]); for (let p of n) { p.dims = [1, ...p.dims]; let l = { inputs: p, encoder_outputs: null, prev_model_outputs: null, output_token_ids: h, done: !1, score: 0, id: f++ }; s && (l.attention_mask = prepareAttentionMask(b, p)), c.push(l) } return c } async function seq2seqRunBeam(b, n) { const a = b.main_input_name; let u = n.output_token_ids; n.prev_model_outputs && (u = u.slice(-1)); let c = { [a]: n.inputs, decoder_input_ids: toI64Tensor(u), encoder_outputs: n.encoder_outputs, past_key_values: n.prev_model_outputs?.past_key_values }; n.attention_mask && (c.attention_mask = n.attention_mask); let f = await b.forward(c); return n.prev_model_outputs = f, n.encoder_outputs = f.encoder_outputs, f } function seq2seqUpdatebeam(b, n) { b.output_token_ids = [...b.output_token_ids, n] } async function encoderForward(b, n) { const a = Object.create(null); for (const u of b.session.inputNames) a[u] = n[u]; return b.session.inputNames.includes("token_type_ids") && !a.token_type_ids && (a.token_type_ids = new Tensor("int64", new BigInt64Array(a.input_ids.data.length), a.input_ids.dims)), await sessionRun(b.session, a) } async function decoderForward(b, n) { let { input_ids: a, past_key_values: u, attention_mask: c } = n, f = { input_ids: a, attention_mask: c ?? prepareAttentionMask(b, a) }; const s = !!u; b.session.inputNames.includes("use_cache_branch") && (f.use_cache_branch = boolTensor(s)), preparePositionIds(b.session, f, s), b.addPastKeyValues(f, u); let h = await sessionRun(b.session, f), p = h.logits; return u = b.getPastKeyValues(h, u), { logits: p, past_key_values: u } } function decoderStartBeams(b, n, a, u, c) { let f = [], s = 0; for (let h of n) { let p = h.tolist().map(Number); h.dims = [1, ...h.dims]; let l; c ? (l = c[s], l.dims = [1, ...l.dims]) : l = prepareAttentionMask(b, h); let o = { input: h, model_input_ids: h, attention_mask: l, prev_model_outputs: null, output_token_ids: p, num_output_tokens: u, done: !1, score: 0, id: s++ }; f.push(o) } return f } async function decoderRunBeam(b, n) { let a = new BigInt64Array(n.output_token_ids.length).fill(1n), u = { input_ids: n.model_input_ids, attention_mask: new Tensor("int64", a, [1, a.length]), past_key_values: n.prev_model_outputs?.past_key_values }, c = await b.forward(u); return n.prev_model_outputs = c, c } function decoderUpdatebeam(b, n) { b.output_token_ids = [...b.output_token_ids, n], b.model_input_ids = new Tensor("int64", [BigInt(n)], [1, 1]) } class PreTrainedModel extends Callable { main_input_name = "input_ids"; constructor(n, a) { super(), this.config = n, this.session = a; const u = MODEL_CLASS_TO_NAME_MAPPING.get(this.constructor), c = MODEL_TYPE_MAPPING.get(u); this.can_generate = !1, this._runBeam = null, this._getStartBeams = null, this._updateBeam = null, this._forward = null, c === MODEL_TYPES.DecoderOnly ? (this.can_generate = !0, this._runBeam = decoderRunBeam, this._getStartBeams = decoderStartBeams, this._updateBeam = decoderUpdatebeam, this._forward = decoderForward) : c === MODEL_TYPES.Seq2Seq || c === MODEL_TYPES.Vision2Seq ? (this.can_generate = !0, this._runBeam = seq2seqRunBeam, this._getStartBeams = seq2seqStartBeams, this._updateBeam = seq2seqUpdatebeam, this._forward = seq2seqForward) : c === MODEL_TYPES.EncoderDecoder ? this._forward = encoderForward : this._forward = encoderForward } async dispose() { const n = []; for (let a of Object.keys(this)) { const u = this[a]; u instanceof InferenceSession && n.push(u.handler.dispose()) } return await Promise.all(n) } static async from_pretrained(n, { quantized: a = !0, progress_callback: u = null, config: c = null, cache_dir: f = null, local_files_only: s = !1, revision: h = "main", model_file_name: p = null } = {}) { let l = { quantized: a, progress_callback: u, config: c, cache_dir: f, local_files_only: s, revision: h, model_file_name: p }; const o = MODEL_CLASS_TO_NAME_MAPPING.get(this), t = MODEL_TYPE_MAPPING.get(o); let e; return t === MODEL_TYPES.DecoderOnly ? e = await Promise.all([AutoConfig.from_pretrained(n, l), constructSession(n, l.model_file_name ?? "decoder_model_merged", l), getModelJSON(n, "generation_config.json", !1, l)]) : t === MODEL_TYPES.Seq2Seq || t === MODEL_TYPES.Vision2Seq ? e = await Promise.all([AutoConfig.from_pretrained(n, l), constructSession(n, "encoder_model", l), constructSession(n, "decoder_model_merged", l), getModelJSON(n, "generation_config.json", !1, l)]) : t === MODEL_TYPES.MaskGeneration ? e = await Promise.all([AutoConfig.from_pretrained(n, l), constructSession(n, "vision_encoder", l), constructSession(n, "prompt_encoder_mask_decoder", l)]) : t === MODEL_TYPES.EncoderDecoder ? e = await Promise.all([AutoConfig.from_pretrained(n, l), constructSession(n, "encoder_model", l), constructSession(n, "decoder_model_merged", l)]) : (t !== MODEL_TYPES.EncoderOnly && console.warn(`Model type for '${o??c?.model_type}' not found, assuming encoder-only architecture. Please report this at https://github.com/xenova/transformers.js/issues/new/choose.`), e = await Promise.all([AutoConfig.from_pretrained(n, l), constructSession(n, l.model_file_name ?? "model", l)])), new this(...e) } async _call(n) { return await this.forward(n) } async forward(n) { return await this._forward(this, n) } _get_logits_processor(n, a, u = null) { const c = new LogitsProcessorList; if (n.repetition_penalty !== null && n.repetition_penalty !== 1 && c.push(new RepetitionPenaltyLogitsProcessor(n.repetition_penalty)), n.no_repeat_ngram_size !== null && n.no_repeat_ngram_size > 0 && c.push(new NoRepeatNGramLogitsProcessor(n.no_repeat_ngram_size)), n.bad_words_ids !== null && c.push(new NoBadWordsLogitsProcessor(n.bad_words_ids, n.eos_token_id)), n.min_length !== null && n.eos_token_id !== null && n.min_length > 0 && c.push(new MinLengthLogitsProcessor(n.min_length, n.eos_token_id)), n.min_new_tokens !== null && n.eos_token_id !== null && n.min_new_tokens > 0 && c.push(new MinNewTokensLengthLogitsProcessor(a, n.min_new_tokens, n.eos_token_id)), n.forced_bos_token_id !== null && c.push(new ForcedBOSTokenLogitsProcessor(n.forced_bos_token_id)), n.forced_eos_token_id !== null && c.push(new ForcedEOSTokenLogitsProcessor(n.max_length, n.forced_eos_token_id)), n.begin_suppress_tokens !== null) { let f = a > 1 || n.forced_bos_token_id === null ? a : a + 1; n.forced_decoder_ids !== null && (f += n.forced_decoder_ids[n.forced_decoder_ids.length - 1][0]), c.push(new SuppressTokensAtBeginLogitsProcessor(n.begin_suppress_tokens, f)) } return n.forced_decoder_ids !== null && c.push(new ForceTokensLogitsProcessor(n.forced_decoder_ids)), u !== null && c.extend(u), c } _get_generation_config(n) { let a = new GenerationConfig(this.config); return "generation_config" in this && Object.assign(a, this.generation_config), n !== null && Object.assign(a, n), a } async generate(n, a = null, u = null, { inputs_attention_mask: c = null } = {}) { if (!this.can_generate) { let g = `The current model class (${MODEL_CLASS_TO_NAME_MAPPING.get(this.constructor)}) is not compatible with \`.generate()\`, as it doesn't have a language model head.`; const m = this.config.model_type, _ = MODEL_WITH_LM_HEAD_MAPPING_NAMES.get(m) ?? MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES.get(m) ?? MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES.get(m) ?? MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES.get(m); throw _ && (g += ` Please use the following class instead: '${_[0]}'`), Error(g) } if (!(n instanceof Tensor) && !isTypedArray(n) && !Array.isArray(n)) throw Error(`\`inputs\` must be a Tensor, TypedArray, or Array, but is "${n.constructor.name}".`); let f; if (this.config.is_encoder_decoder) f = 0; else if (f = n instanceof Tensor ? n.dims.at(-1) : n.length, f === 0) throw Error("Must supply a non-empty array of input token ids."); a = this._get_generation_config(a), u = u ?? new LogitsProcessorList, u = this._get_logits_processor(a, f, u); let s = a.eos_token_id; s !== null && !Array.isArray(s) && (s = [s]); let h = 1; const p = h + (a.max_new_tokens ?? 1 / 0), l = Number.isInteger(a.max_length) && (a.max_new_tokens ?? null) === null; let o = Sampler.getSampler(a), t = this.getStartBeams(n, a, h, c); for (; t.some(d => !d.done) && h < p;) { let d = []; for (let g of t) { if (g.done) { d.push(g); continue } if (l && g.output_token_ids.length >= a.max_length) { g.done = !0, d.push(g); continue } let m = await this.runBeam(g); a.output_attentions && this.addAttentionsToBeam(g, m), a.output_scores; let _ = m.logits.slice(null, -1, null); u(g.output_token_ids, _); let y = o(_); for (let [T, w] of y) { let S = { ...g }; this.updateBeam(S, T), S.score += w, s && s.includes(T) && (S.done = !0), d.push(S) } }++h, d = this.groupBeams(d).map(g => g.sort((m, _) => _.score - m.score).slice(0, a.num_beams)), t = d.flat(), a.callback_function && a.callback_function(t) } const e = this.groupBeams(t), r = d => e.map(g => a.num_return_sequences > 1 ? g.slice(0, a.num_return_sequences).map(m => m[d]) : [g[0][d]]).flat(), i = r("output_token_ids"); if (a.return_dict_in_generate) { const d = r("decoder_attentions"), g = r("cross_attentions"); return { sequences: i, decoder_attentions: d, cross_attentions: g } } else return i } addAttentionsToBeam(n, a) { if (this.config.is_encoder_decoder) { if (!a.cross_attentions || a.cross_attentions.length === 0) throw Error("`output_attentions` is true, but the model did not produce cross-attentions. This is most likely because the model was not exported with `output_attentions=True`."); n.cross_attentions || (n.cross_attentions = []), n.cross_attentions.push(a.cross_attentions) } if (!a.decoder_attentions || a.decoder_attentions.length === 0) throw Error("`output_attentions` is true, but the model did not produce decoder-attentions. This is most likely because the model was not exported with `output_attentions=True`."); n.decoder_attentions || (n.decoder_attentions = []), n.decoder_attentions.push(a.decoder_attentions) } groupBeams(n) { const a = Object.create(null); for (const u of n) a[u.id] === void 0 ? a[u.id] = [u] : a[u.id].push(u); return Object.values(a) } getPastKeyValues(n, a) { const u = Object.create(null); for (const c in n) if (c.startsWith("present")) { let f = c.replace("present", "past_key_values"); a && c.includes("encoder") ? u[f] = a[f] : u[f] = n[c] } return u } getAttentions(n) { const a = Object.create(null); for (const u of ["cross_attentions", "decoder_attentions"]) { const c = []; for (const f in n) if (f.startsWith(u)) { const s = f.split(".").pop(); c[s] = n[f] } a[u] = c } return a } addPastKeyValues(n, a) { if (a) Object.assign(n, a); else if (this.config.is_encoder_decoder && (this.add_encoder_pkv ?? !0)) { let c = [1, this.num_encoder_heads, 0, this.encoder_dim_kv], f = [1, this.num_decoder_heads, 0, this.decoder_dim_kv]; for (let s = 0; s < this.num_decoder_layers; ++s) n[`past_key_values.${s}.encoder.key`] = new Tensor("float32", [], c), n[`past_key_values.${s}.encoder.value`] = new Tensor("float32", [], c), n[`past_key_values.${s}.decoder.key`] = new Tensor("float32", [], f), n[`past_key_values.${s}.decoder.value`] = new Tensor("float32", [], f) } else if (this.config.model_type === "falcon") { let c = [1 * this.num_heads, 0, this.dim_kv]; for (let f = 0; f < this.num_layers; ++f) n[`past_key_values.${f}.key`] = new Tensor("float32", [], c), n[`past_key_values.${f}.value`] = new Tensor("float32", [], c) } else if (this.config.multi_query) { let c = [1 * this.num_heads, 0, 2 * this.dim_kv]; for (let f = 0; f < this.num_layers; ++f) n[`past_key_values.${f}.key_value`] = new Tensor("float32", [], c) } else if (this.config.model_type === "bloom") { let c = [1 * this.num_heads, this.dim_kv, 0], f = [1 * this.num_heads, 0, this.dim_kv]; for (let s = 0; s < this.num_layers; ++s) n[`past_key_values.${s}.key`] = new Tensor("float32", [], c), n[`past_key_values.${s}.value`] = new Tensor("float32", [], f) } else { let c = [1, this.num_heads, 0, this.dim_kv]; for (let f = 0; f < this.num_layers; ++f) n[`past_key_values.${f}.key`] = new Tensor("float32", [], c), n[`past_key_values.${f}.value`] = new Tensor("float32", [], c) } } getStartBeams(n, a, u, c) { return this._getStartBeams(this, n, a, u, c) } async runBeam(n) { return await this._runBeam(this, n) } updateBeam(n, a) { return this._updateBeam(n, a) } } class ModelOutput {} class BertPreTrainedModel extends PreTrainedModel {} class BertModel extends BertPreTrainedModel {} class BertForMaskedLM extends BertPreTrainedModel { async _call(n) { return new MaskedLMOutput(await super._call(n)) } } class BertForSequenceClassification extends BertPreTrainedModel { async _call(n) { return new SequenceClassifierOutput(await super._call(n)) } } class BertForTokenClassification extends BertPreTrainedModel { async _call(n) { return new TokenClassifierOutput(await super._call(n)) } } class BertForQuestionAnswering extends BertPreTrainedModel { async _call(n) { return new QuestionAnsweringModelOutput(await super._call(n)) } } class RoFormerPreTrainedModel extends PreTrainedModel {} class RoFormerModel extends RoFormerPreTrainedModel {} class RoFormerForMaskedLM extends RoFormerPreTrainedModel { async _call(n) { return new MaskedLMOutput(await super._call(n)) } } class RoFormerForSequenceClassification extends RoFormerPreTrainedModel { async _call(n) { return new SequenceClassifierOutput(await super._call(n)) } } class RoFormerForTokenClassification extends RoFormerPreTrainedModel { async _call(n) { return new TokenClassifierOutput(await super._call(n)) } } class RoFormerForQuestionAnswering extends RoFormerPreTrainedModel { async _call(n) { return new QuestionAnsweringModelOutput(await super._call(n)) } } class ConvBertPreTrainedModel extends PreTrainedModel {} class ConvBertModel extends ConvBertPreTrainedModel {} class ConvBertForMaskedLM extends ConvBertPreTrainedModel { async _call(n) { return new MaskedLMOutput(await super._call(n)) } } class ConvBertForSequenceClassification extends ConvBertPreTrainedModel { async _call(n) { return new SequenceClassifierOutput(await super._call(n)) } } class ConvBertForTokenClassification extends ConvBertPreTrainedModel { async _call(n) { return new TokenClassifierOutput(await super._call(n)) } } class ConvBertForQuestionAnswering extends ConvBertPreTrainedModel { async _call(n) { return new QuestionAnsweringModelOutput(await super._call(n)) } } class ElectraPreTrainedModel extends PreTrainedModel {} class ElectraModel extends ElectraPreTrainedModel {} class ElectraForMaskedLM extends ElectraPreTrainedModel { async _call(n) { return new MaskedLMOutput(await super._call(n)) } } class ElectraForSequenceClassification extends ElectraPreTrainedModel { async _call(n) { return new SequenceClassifierOutput(await super._call(n)) } } class ElectraForTokenClassification extends ElectraPreTrainedModel { async _call(n) { return new TokenClassifierOutput(await super._call(n)) } } class ElectraForQuestionAnswering extends ElectraPreTrainedModel { async _call(n) { return new QuestionAnsweringModelOutput(await super._call(n)) } } class CamembertPreTrainedModel extends PreTrainedModel {} class CamembertModel extends CamembertPreTrainedModel {} class CamembertForMaskedLM extends CamembertPreTrainedModel { async _call(n) { return new MaskedLMOutput(await super._call(n)) } } class CamembertForSequenceClassification extends CamembertPreTrainedModel { async _call(n) { return new SequenceClassifierOutput(await super._call(n)) } } class CamembertForTokenClassification extends CamembertPreTrainedModel { async _call(n) { return new TokenClassifierOutput(await super._call(n)) } } class CamembertForQuestionAnswering extends CamembertPreTrainedModel { async _call(n) { return new QuestionAnsweringModelOutput(await super._call(n)) } } class DebertaPreTrainedModel extends PreTrainedModel {} class DebertaModel extends DebertaPreTrainedModel {} class DebertaForMaskedLM extends DebertaPreTrainedModel { async _call(n) { return new MaskedLMOutput(await super._call(n)) } } class DebertaForSequenceClassification extends DebertaPreTrainedModel { async _call(n) { return new SequenceClassifierOutput(await super._call(n)) } } class DebertaForTokenClassification extends DebertaPreTrainedModel { async _call(n) { return new TokenClassifierOutput(await super._call(n)) } } class DebertaForQuestionAnswering extends DebertaPreTrainedModel { async _call(n) { return new QuestionAnsweringModelOutput(await super._call(n)) } } class DebertaV2PreTrainedModel extends PreTrainedModel {} class DebertaV2Model extends DebertaV2PreTrainedModel {} class DebertaV2ForMaskedLM extends DebertaV2PreTrainedModel { async _call(n) { return new MaskedLMOutput(await super._call(n)) } } class DebertaV2ForSequenceClassification extends DebertaV2PreTrainedModel { async _call(n) { return new SequenceClassifierOutput(await super._call(n)) } } class DebertaV2ForTokenClassification extends DebertaV2PreTrainedModel { async _call(n) { return new TokenClassifierOutput(await super._call(n)) } } class DebertaV2ForQuestionAnswering extends DebertaV2PreTrainedModel { async _call(n) { return new QuestionAnsweringModelOutput(await super._call(n)) } } class DistilBertPreTrainedModel extends PreTrainedModel {} class DistilBertModel extends DistilBertPreTrainedModel {} class DistilBertForSequenceClassification extends DistilBertPreTrainedModel { async _call(n) { return new SequenceClassifierOutput(await super._call(n)) } } class DistilBertForTokenClassification extends DistilBertPreTrainedModel { async _call(n) { return new TokenClassifierOutput(await super._call(n)) } } class DistilBertForQuestionAnswering extends DistilBertPreTrainedModel { async _call(n) { return new QuestionAnsweringModelOutput(await super._call(n)) } } class DistilBertForMaskedLM extends DistilBertPreTrainedModel { async _call(n) { return new MaskedLMOutput(await super._call(n)) } } class EsmPreTrainedModel extends PreTrainedModel {} class EsmModel extends EsmPreTrainedModel {} class EsmForMaskedLM extends EsmPreTrainedModel { async _call(n) { return new MaskedLMOutput(await super._call(n)) } } class EsmForSequenceClassification extends EsmPreTrainedModel { async _call(n) { return new SequenceClassifierOutput(await super._call(n)) } } class EsmForTokenClassification extends EsmPreTrainedModel { async _call(n) { return new TokenClassifierOutput(await super._call(n)) } } class MobileBertPreTrainedModel extends PreTrainedModel {} class MobileBertModel extends MobileBertPreTrainedModel {} class MobileBertForMaskedLM extends MobileBertPreTrainedModel { async _call(n) { return new MaskedLMOutput(await super._call(n)) } } class MobileBertForSequenceClassification extends MobileBertPreTrainedModel { async _call(n) { return new SequenceClassifierOutput(await super._call(n)) } } class MobileBertForQuestionAnswering extends MobileBertPreTrainedModel { async _call(n) { return new QuestionAnsweringModelOutput(await super._call(n)) } } class MPNetPreTrainedModel extends PreTrainedModel {} class MPNetModel extends MPNetPreTrainedModel {} class MPNetForMaskedLM extends MPNetPreTrainedModel { async _call(n) { return new MaskedLMOutput(await super._call(n)) } } class MPNetForSequenceClassification extends MPNetPreTrainedModel { async _call(n) { return new SequenceClassifierOutput(await super._call(n)) } } class MPNetForTokenClassification extends MPNetPreTrainedModel { async _call(n) { return new TokenClassifierOutput(await super._call(n)) } } class MPNetForQuestionAnswering extends MPNetPreTrainedModel { async _call(n) { return new QuestionAnsweringModelOutput(await super._call(n)) } } class SqueezeBertPreTrainedModel extends PreTrainedModel {} class SqueezeBertModel extends SqueezeBertPreTrainedModel {} class SqueezeBertForMaskedLM extends SqueezeBertPreTrainedModel { async _call(n) { return new MaskedLMOutput(await super._call(n)) } } class SqueezeBertForSequenceClassification extends SqueezeBertPreTrainedModel { async _call(n) { return new SequenceClassifierOutput(await super._call(n)) } } class SqueezeBertForQuestionAnswering extends SqueezeBertPreTrainedModel { async _call(n) { return new QuestionAnsweringModelOutput(await super._call(n)) } } class AlbertPreTrainedModel extends PreTrainedModel {} class AlbertModel extends AlbertPreTrainedModel {} class AlbertForSequenceClassification extends AlbertPreTrainedModel { async _call(n) { return new SequenceClassifierOutput(await super._call(n)) } } class AlbertForQuestionAnswering extends AlbertPreTrainedModel { async _call(n) { return new QuestionAnsweringModelOutput(await super._call(n)) } } class AlbertForMaskedLM extends AlbertPreTrainedModel { async _call(n) { return new MaskedLMOutput(await super._call(n)) } } class T5PreTrainedModel extends PreTrainedModel {} class T5Model extends T5PreTrainedModel {} class T5ForConditionalGeneration extends T5PreTrainedModel { constructor(n, a, u, c) { super(n, a), this.decoder_merged_session = u, this.generation_config = c, this.num_decoder_layers = this.config.num_decoder_layers, this.num_decoder_heads = this.config.num_heads, this.decoder_dim_kv = this.config.d_kv, this.num_encoder_layers = this.config.num_layers, this.num_encoder_heads = this.config.num_heads, this.encoder_dim_kv = this.config.d_kv } } class LongT5PreTrainedModel extends PreTrainedModel {} class LongT5Model extends LongT5PreTrainedModel {} class LongT5ForConditionalGeneration extends LongT5PreTrainedModel { constructor(n, a, u, c) { super(n, a), this.decoder_merged_session = u, this.generation_config = c, this.num_decoder_layers = this.config.num_decoder_layers, this.num_decoder_heads = this.config.num_heads, this.decoder_dim_kv = this.config.d_kv, this.num_encoder_layers = this.config.num_layers, this.num_encoder_heads = this.config.num_heads, this.encoder_dim_kv = this.config.d_kv } } class MT5PreTrainedModel extends PreTrainedModel {} class MT5Model extends MT5PreTrainedModel {} class MT5ForConditionalGeneration extends MT5PreTrainedModel { constructor(n, a, u, c) { super(n, a), this.decoder_merged_session = u, this.generation_config = c, this.num_decoder_layers = this.config.num_decoder_layers, this.num_decoder_heads = this.config.num_heads, this.decoder_dim_kv = this.config.d_kv, this.num_encoder_layers = this.config.num_layers, this.num_encoder_heads = this.config.num_heads, this.encoder_dim_kv = this.config.d_kv } } class BartPretrainedModel extends PreTrainedModel {} class BartModel extends BartPretrainedModel {} class BartForConditionalGeneration extends BartPretrainedModel { constructor(n, a, u, c) { super(n, a), this.decoder_merged_session = u, this.generation_config = c, this.num_decoder_layers = this.config.decoder_layers, this.num_decoder_heads = this.config.decoder_attention_heads, this.decoder_dim_kv = this.config.d_model / this.num_decoder_heads, this.num_encoder_layers = this.config.encoder_layers, this.num_encoder_heads = this.config.encoder_attention_heads, this.encoder_dim_kv = this.config.d_model / this.num_encoder_heads } } class BartForSequenceClassification extends BartPretrainedModel { async _call(n) { return new SequenceClassifierOutput(await super._call(n)) } } class MBartPreTrainedModel extends PreTrainedModel {} class MBartModel extends MBartPreTrainedModel {} class MBartForConditionalGeneration extends MBartPreTrainedModel { constructor(n, a, u, c) { super(n, a), this.decoder_merged_session = u, this.generation_config = c, this.num_decoder_layers = this.config.decoder_layers, this.num_decoder_heads = this.config.decoder_attention_heads, this.decoder_dim_kv = this.config.d_model / this.num_decoder_heads, this.num_encoder_layers = this.config.encoder_layers, this.num_encoder_heads = this.config.encoder_attention_heads, this.encoder_dim_kv = this.config.d_model / this.num_encoder_heads } } class MBartForSequenceClassification extends MBartPreTrainedModel { async _call(n) { return new SequenceClassifierOutput(await super._call(n)) } } class MBartForCausalLM extends MBartPreTrainedModel { constructor(n, a, u) { super(n, a), this.generation_config = u, this.num_decoder_layers = this.config.decoder_layers, this.num_decoder_heads = this.config.decoder_attention_heads, this.decoder_dim_kv = this.config.d_model / this.num_decoder_heads, this.num_encoder_layers = this.config.encoder_layers, this.num_encoder_heads = this.config.encoder_attention_heads, this.encoder_dim_kv = this.config.d_model / this.num_encoder_heads } } class BlenderbotPreTrainedModel extends PreTrainedModel {} class BlenderbotModel extends BlenderbotPreTrainedModel {} class BlenderbotForConditionalGeneration extends BlenderbotPreTrainedModel { constructor(n, a, u, c) { super(n, a), this.decoder_merged_session = u, this.generation_config = c, this.num_decoder_layers = this.config.decoder_layers, this.num_decoder_heads = this.config.decoder_attention_heads, this.decoder_dim_kv = this.config.d_model / this.num_decoder_heads, this.num_encoder_layers = this.config.encoder_layers, this.num_encoder_heads = this.config.encoder_attention_heads, this.encoder_dim_kv = this.config.d_model / this.num_encoder_heads } } class BlenderbotSmallPreTrainedModel extends PreTrainedModel {} class BlenderbotSmallModel extends BlenderbotSmallPreTrainedModel {} class BlenderbotSmallForConditionalGeneration extends BlenderbotSmallPreTrainedModel { constructor(n, a, u, c) { super(n, a), this.decoder_merged_session = u, this.generation_config = c, this.num_decoder_layers = this.config.decoder_layers, this.num_decoder_heads = this.config.decoder_attention_heads, this.decoder_dim_kv = this.config.d_model / this.num_decoder_heads, this.num_encoder_layers = this.config.encoder_layers, this.num_encoder_heads = this.config.encoder_attention_heads, this.encoder_dim_kv = this.config.d_model / this.num_encoder_heads } } class RobertaPreTrainedModel extends PreTrainedModel {} class RobertaModel extends RobertaPreTrainedModel {} class RobertaForMaskedLM extends RobertaPreTrainedModel { async _call(n) { return new MaskedLMOutput(await super._call(n)) } } class RobertaForSequenceClassification extends RobertaPreTrainedModel { async _call(n) { return new SequenceClassifierOutput(await super._call(n)) } } class RobertaForTokenClassification extends RobertaPreTrainedModel { async _call(n) { return new TokenClassifierOutput(await super._call(n)) } } class RobertaForQuestionAnswering extends RobertaPreTrainedModel { async _call(n) { return new QuestionAnsweringModelOutput(await super._call(n)) } } class XLMPreTrainedModel extends PreTrainedModel {} class XLMModel extends XLMPreTrainedModel {} class XLMWithLMHeadModel extends XLMPreTrainedModel { async _call(n) { return new MaskedLMOutput(await super._call(n)) } } class XLMForSequenceClassification extends XLMPreTrainedModel { async _call(n) { return new SequenceClassifierOutput(await super._call(n)) } } class XLMForTokenClassification extends XLMPreTrainedModel { async _call(n) { return new TokenClassifierOutput(await super._call(n)) } } class XLMForQuestionAnswering extends XLMPreTrainedModel { async _call(n) { return new QuestionAnsweringModelOutput(await super._call(n)) } } class XLMRobertaPreTrainedModel extends PreTrainedModel {} class XLMRobertaModel extends XLMRobertaPreTrainedModel {} class XLMRobertaForMaskedLM extends XLMRobertaPreTrainedModel { async _call(n) { return new MaskedLMOutput(await super._call(n)) } } class XLMRobertaForSequenceClassification extends XLMRobertaPreTrainedModel { async _call(n) { return new SequenceClassifierOutput(await super._call(n)) } } class XLMRobertaForTokenClassification extends XLMRobertaPreTrainedModel { async _call(n) { return new TokenClassifierOutput(await super._call(n)) } } class XLMRobertaForQuestionAnswering extends XLMRobertaPreTrainedModel { async _call(n) { return new QuestionAnsweringModelOutput(await super._call(n)) } } class ASTPreTrainedModel extends PreTrainedModel {} class ASTModel extends ASTPreTrainedModel {} class ASTForAudioClassification extends ASTPreTrainedModel {} class WhisperPreTrainedModel extends PreTrainedModel {} class WhisperModel extends WhisperPreTrainedModel {} class WhisperForConditionalGeneration extends WhisperPreTrainedModel { requires_attention_mask = !1; main_input_name = "input_features"; constructor(n, a, u, c) { super(n, a), this.decoder_merged_session = u, this.generation_config = c, this.num_decoder_layers = this.config.decoder_layers, this.num_decoder_heads = this.config.decoder_attention_heads, this.decoder_dim_kv = this.config.d_model / this.num_decoder_heads, this.num_encoder_layers = this.config.encoder_layers, this.num_encoder_heads = this.config.encoder_attention_heads, this.encoder_dim_kv = this.config.d_model / this.num_encoder_heads } async generate(n, a = null, u = null) { if (a = this._get_generation_config(a), a.return_timestamps ??= !1, a.return_timestamps && (u = [new WhisperTimeStampLogitsProcessor(a)]), a.return_token_timestamps && (a.output_attentions = !0, a.return_dict_in_generate = !0, a.task === "translate" && console.warn("Token-level timestamps may not be reliable for task 'translate'."), !a.alignment_heads)) throw new Error("Model generation config has no `alignment_heads`, token-level timestamps not available. See https://gist.github.com/hollance/42e32852f24243b748ae6bc1f985b13a on how to add this property to the generation config."); const c = await super.generate(n, a, u); return a.return_token_timestamps && a.alignment_heads && (c.token_timestamps = this._extract_token_timestamps(c, a.alignment_heads, a.num_frames)), c } _extract_token_timestamps(n, a, u = null, c = .02) { if (!n.cross_attentions) throw new Error("Model outputs must contain cross attentions to extract timestamps. This is most likely because the model was not exported with `output_attentions=True`."); let f = this.config.median_filter_width; f === void 0 && (console.warn("Model config has no `median_filter_width`, using default value of 7."), f = 7); const s = n.cross_attentions.map(l => { let o = Array.from({ length: this.config.decoder_layers }, (g, m) => cat(l.map(_ => _[m]), 2)), t = stack(a.map(([g, m]) => u ? o[g].slice(null, m, null, [0, u]) : o[g].slice(null, m))); t = t.transpose(1, 0, 2, 3); let [e, r] = std_mean(t, -2, 0, !0), i = t.clone(); for (let g = 0; g < i.dims[0]; ++g) { let m = i[g]; for (let _ = 0; _ < m.dims[0]; ++_) { let y = m[_]; const T = e[g][_][0], w = r[g][_][0]; for (let S = 0; S < y.dims[0]; ++S) { let O = y[S]; for (let E = 0; E < O.data.length; ++E) O.data[E] = (O.data[E] - w.data[E]) / T.data[E]; O.data.set(medianFilter(O.data, f)) } } } return mean(i, 1) }), h = [n.sequences.length, n.sequences[0].length], p = new Tensor("float32", new Float32Array(h[0] * h[1]), h); for (let l = 0; l < h[0]; ++l) { const o = s[l].neg().squeeze_(0); let [t, e] = dynamicTimeWarping(o), r = Array.from({ length: t.length - 1 }, (g, m) => t[m + 1] - t[m]), i = mergeArrays([1], r).map(g => !!g), d = []; for (let g = 0; g < i.length; ++g) i[g] && d.push(e[g] * c); p[l].data.set(d, 1) } return p } } class VisionEncoderDecoderModel extends PreTrainedModel { main_input_name = "pixel_values"; constructor(n, a, u, c) { super(n, a), this.decoder_merged_session = u, this.generation_config = c; const f = this.config.encoder, s = this.config.decoder, h = f.model_type; (MODEL_MAPPING_NAMES_ENCODER_ONLY.get(h) ?? MODEL_MAPPING_NAMES_ENCODER_DECODER.get(h)) || console.warn(`Model type for encoder '${h}' not found, assuming encoder-only architecture. Please report this at https://github.com/xenova/transformers.js/issues/new/choose.`); const l = MODEL_WITH_LM_HEAD_MAPPING_NAMES.get(s.model_type); if (!l) throw new Error(`Unable to construct \`VisionEncoderDecoder\` due to unsupported decoder: "${this.config.decoder.model_type}"`); const o = l[1], t = new o(s, u, c); this.add_encoder_pkv = "num_decoder_layers" in t, this.add_encoder_pkv ? (this.num_decoder_layers = t.num_decoder_layers, this.num_decoder_heads = t.num_decoder_heads, this.decoder_dim_kv = t.decoder_dim_kv, this.num_encoder_layers = t.num_encoder_layers, this.num_encoder_heads = t.num_encoder_heads, this.encoder_dim_kv = t.encoder_dim_kv) : (this.num_layers = t.num_layers, this.num_heads = t.num_heads, this.dim_kv = t.dim_kv) } } class CLIPPreTrainedModel extends PreTrainedModel {} class CLIPModel extends CLIPPreTrainedModel {} class CLIPTextModelWithProjection extends CLIPPreTrainedModel { static async from_pretrained(n, a = {}) { return a.model_file_name ??= "text_model", super.from_pretrained(n, a) } } class CLIPVisionModelWithProjection extends CLIPPreTrainedModel { static async from_pretrained(n, a = {}) { return a.model_file_name ??= "vision_model", super.from_pretrained(n, a) } } class SiglipPreTrainedModel extends PreTrainedModel {} class SiglipModel extends SiglipPreTrainedModel {} class SiglipTextModel extends SiglipPreTrainedModel { static async from_pretrained(n, a = {}) { return a.model_file_name ??= "text_model", super.from_pretrained(n, a) } } class SiglipVisionModel extends CLIPPreTrainedModel { static async from_pretrained(n, a = {}) { return a.model_file_name ??= "vision_model", super.from_pretrained(n, a) } } class ChineseCLIPPreTrainedModel extends PreTrainedModel {} class ChineseCLIPModel extends ChineseCLIPPreTrainedModel {} class CLIPSegPreTrainedModel extends PreTrainedModel {} class CLIPSegModel extends CLIPSegPreTrainedModel {} class CLIPSegForImageSegmentation extends CLIPSegPreTrainedModel {} class GPT2PreTrainedModel extends PreTrainedModel { constructor(n, a, u) { super(n, a), this.generation_config = u, this.config.pad_token_id = this.config.eos_token_id, this.num_heads = this.config.n_head, this.num_layers = this.config.n_layer, this.dim_kv = this.config.n_embd / this.num_heads } } class GPT2Model extends GPT2PreTrainedModel {} class GPT2LMHeadModel extends GPT2PreTrainedModel {} class GPTNeoPreTrainedModel extends PreTrainedModel { constructor(n, a, u) { super(n, a), this.generation_config = u, this.config.pad_token_id = this.config.eos_token_id, this.num_heads = this.config.num_heads, this.num_layers = this.config.num_layers, this.dim_kv = this.config.hidden_size / this.num_heads } } class GPTNeoModel extends GPTNeoPreTrainedModel {} class GPTNeoForCausalLM extends GPTNeoPreTrainedModel {} class GPTNeoXPreTrainedModel extends PreTrainedModel { constructor(n, a, u) { super(n, a), this.generation_config = u, this.config.pad_token_id = this.config.eos_token_id, this.num_heads = this.config.num_attention_heads, this.num_layers = this.config.num_hidden_layers, this.dim_kv = this.config.hidden_size / this.num_heads } } class GPTNeoXModel extends GPTNeoXPreTrainedModel {} class GPTNeoXForCausalLM extends GPTNeoXPreTrainedModel {} class GPTJPreTrainedModel extends PreTrainedModel { constructor(n, a, u) { super(n, a), this.generation_config = u, this.config.pad_token_id = this.config.eos_token_id, this.num_heads = this.config.n_head, this.num_layers = this.config.n_layer, this.dim_kv = this.config.n_embd / this.num_heads } } class GPTJModel extends GPTJPreTrainedModel {} class GPTJForCausalLM extends GPTJPreTrainedModel {} class GPTBigCodePreTrainedModel extends PreTrainedModel { constructor(n, a, u) { super(n, a), this.generation_config = u, this.config.pad_token_id = this.config.eos_token_id, this.num_heads = this.config.n_head, this.num_layers = this.config.n_layer, this.dim_kv = this.config.n_embd / this.num_heads } } class GPTBigCodeModel extends GPTBigCodePreTrainedModel {} class GPTBigCodeForCausalLM extends GPTBigCodePreTrainedModel {} class CodeGenPreTrainedModel extends PreTrainedModel { constructor(n, a, u) { super(n, a), this.generation_config = u, this.config.pad_token_id = this.config.eos_token_id, this.num_heads = this.config.n_head, this.num_layers = this.config.n_layer, this.dim_kv = this.config.n_embd / this.num_heads } } class CodeGenModel extends CodeGenPreTrainedModel {} class CodeGenForCausalLM extends CodeGenPreTrainedModel {} class LlamaPreTrainedModel extends PreTrainedModel { constructor(n, a, u) { super(n, a), this.generation_config = u, this.config.pad_token_id = this.config.eos_token_id, this.num_heads = this.config.num_key_value_heads ?? this.config.num_attention_heads, this.num_layers = this.config.num_hidden_layers, this.dim_kv = this.config.hidden_size / this.config.num_attention_heads } } class LlamaModel extends LlamaPreTrainedModel {} class LlamaForCausalLM extends LlamaPreTrainedModel {} class Qwen2PreTrainedModel extends PreTrainedModel { constructor(n, a, u) { super(n, a), this.generation_config = u, this.config.pad_token_id = this.config.eos_token_id, this.num_heads = this.config.num_key_value_heads ?? this.config.num_attention_heads, this.num_layers = this.config.num_hidden_layers, this.dim_kv = this.config.hidden_size / this.config.num_attention_heads } } class Qwen2Model extends Qwen2PreTrainedModel {} class Qwen2ForCausalLM extends Qwen2PreTrainedModel {} class PhiPreTrainedModel extends PreTrainedModel { constructor(n, a, u) { super(n, a), this.generation_config = u, this.config.pad_token_id = this.config.eos_token_id, this.num_heads = this.config.num_attention_heads, this.num_layers = this.config.num_hidden_layers, this.dim_kv = this.config.hidden_size / this.num_heads } } class PhiModel extends PhiPreTrainedModel {} class PhiForCausalLM extends PhiPreTrainedModel {} class BloomPreTrainedModel extends PreTrainedModel { constructor(n, a, u) { super(n, a), this.generation_config = u, this.config.pad_token_id = this.config.eos_token_id, this.num_heads = this.config.n_head, this.num_layers = this.config.n_layer, this.dim_kv = this.config.hidden_size / this.num_heads } } class BloomModel extends BloomPreTrainedModel {} class BloomForCausalLM extends BloomPreTrainedModel {} class MptPreTrainedModel extends PreTrainedModel { constructor(n, a, u) { super(n, a), this.generation_config = u, this.config.pad_token_id = this.config.eos_token_id, this.num_heads = this.config.n_heads, this.num_layers = this.config.n_layers, this.dim_kv = this.config.d_model / this.num_heads } } class MptModel extends MptPreTrainedModel {} class MptForCausalLM extends MptPreTrainedModel {} class OPTPreTrainedModel extends PreTrainedModel { constructor(n, a, u) { super(n, a), this.generation_config = u, this.config.pad_token_id = this.config.eos_token_id, this.num_heads = this.config.num_attention_heads, this.num_layers = this.config.num_hidden_layers, this.dim_kv = this.config.hidden_size / this.num_heads } } class OPTModel extends OPTPreTrainedModel {} class OPTForCausalLM extends OPTPreTrainedModel {} class ViTPreTrainedModel extends PreTrainedModel {} class ViTModel extends ViTPreTrainedModel {} class ViTForImageClassification extends ViTPreTrainedModel { async _call(n) { return new SequenceClassifierOutput(await super._call(n)) } } class VitMattePreTrainedModel extends PreTrainedModel {} class VitMatteForImageMatting extends VitMattePreTrainedModel { async _call(n) { return new ImageMattingOutput(await super._call(n)) } } class MobileViTPreTrainedModel extends PreTrainedModel {} class MobileViTModel extends MobileViTPreTrainedModel {} class MobileViTForImageClassification extends MobileViTPreTrainedModel { async _call(n) { return new SequenceClassifierOutput(await super._call(n)) } } class OwlViTPreTrainedModel extends PreTrainedModel {} class OwlViTModel extends OwlViTPreTrainedModel {} class OwlViTForObjectDetection extends OwlViTPreTrainedModel {} class BeitPreTrainedModel extends PreTrainedModel {} class BeitModel extends BeitPreTrainedModel {} class BeitForImageClassification extends BeitPreTrainedModel { async _call(n) { return new SequenceClassifierOutput(await super._call(n)) } } class DetrPreTrainedModel extends PreTrainedModel {} class DetrModel extends DetrPreTrainedModel {} class DetrForObjectDetection extends DetrPreTrainedModel { async _call(n) { return new DetrObjectDetectionOutput(await super._call(n)) } } class DetrForSegmentation extends DetrPreTrainedModel { async _call(n) { return new DetrSegmentationOutput(await super._call(n)) } } class DetrObjectDetectionOutput extends ModelOutput { constructor({ logits: n, pred_boxes: a }) { super(), this.logits = n, this.pred_boxes = a } } class DetrSegmentationOutput extends ModelOutput { constructor({ logits: n, pred_boxes: a, pred_masks: u }) { super(), this.logits = n, this.pred_boxes = a, this.pred_masks = u } } class TableTransformerPreTrainedModel extends PreTrainedModel {} class TableTransformerModel extends TableTransformerPreTrainedModel {} class TableTransformerForObjectDetection extends TableTransformerPreTrainedModel { async _call(n) { return new TableTransformerObjectDetectionOutput(await super._call(n)) } } class TableTransformerObjectDetectionOutput extends DetrObjectDetectionOutput {} class DeiTPreTrainedModel extends PreTrainedModel {} class DeiTModel extends DeiTPreTrainedModel {} class DeiTForImageClassification extends DeiTPreTrainedModel { async _call(n) { return new SequenceClassifierOutput(await super._call(n)) } } class ResNetPreTrainedModel extends PreTrainedModel {} class ResNetModel extends ResNetPreTrainedModel {} class ResNetForImageClassification extends ResNetPreTrainedModel { async _call(n) { return new SequenceClassifierOutput(await super._call(n)) } } class SwinPreTrainedModel extends PreTrainedModel {} class SwinModel extends SwinPreTrainedModel {} class SwinForImageClassification extends SwinPreTrainedModel { async _call(n) { return new SequenceClassifierOutput(await super._call(n)) } } class Swin2SRPreTrainedModel extends PreTrainedModel {} class Swin2SRModel extends Swin2SRPreTrainedModel {} class Swin2SRForImageSuperResolution extends Swin2SRPreTrainedModel {} class DPTPreTrainedModel extends PreTrainedModel {} class DPTModel extends DPTPreTrainedModel {} class DPTForDepthEstimation extends DPTPreTrainedModel {} class DepthAnythingPreTrainedModel extends PreTrainedModel {} class DepthAnythingForDepthEstimation extends DepthAnythingPreTrainedModel {} class GLPNPreTrainedModel extends PreTrainedModel {} class GLPNModel extends GLPNPreTrainedModel {} class GLPNForDepthEstimation extends GLPNPreTrainedModel {} class DonutSwinPreTrainedModel extends PreTrainedModel {} class DonutSwinModel extends DonutSwinPreTrainedModel {} class ConvNextPreTrainedModel extends PreTrainedModel {} class ConvNextModel extends ConvNextPreTrainedModel {} class ConvNextForImageClassification extends ConvNextPreTrainedModel { async _call(n) { return new SequenceClassifierOutput(await super._call(n)) } } class ConvNextV2PreTrainedModel extends PreTrainedModel {} class ConvNextV2Model extends ConvNextV2PreTrainedModel {} class ConvNextV2ForImageClassification extends ConvNextV2PreTrainedModel { async _call(n) { return new SequenceClassifierOutput(await super._call(n)) } } class Dinov2PreTrainedModel extends PreTrainedModel {} class Dinov2Model extends Dinov2PreTrainedModel {} class Dinov2ForImageClassification extends Dinov2PreTrainedModel { async _call(n) { return new SequenceClassifierOutput(await super._call(n)) } } class YolosPreTrainedModel extends PreTrainedModel {} class YolosModel extends YolosPreTrainedModel {} class YolosForObjectDetection extends YolosPreTrainedModel { async _call(n) { return new YolosObjectDetectionOutput(await super._call(n)) } } class YolosObjectDetectionOutput extends ModelOutput { constructor({ logits: n, pred_boxes: a }) { super(), this.logits = n, this.pred_boxes = a } } class SamPreTrainedModel extends PreTrainedModel {} class SamModel extends SamPreTrainedModel { constructor(n, a, u) { super(n, a), this.prompt_encoder_mask_decoder = u } async get_image_embeddings({ pixel_values: n }) { return await encoderForward(this, { pixel_values: n }) } async forward(n) { if ((!n.image_embeddings || !n.image_positional_embeddings) && (n = { ...n, ...await this.get_image_embeddings(n) }), !n.input_labels) { const a = n.input_points.dims.slice(0, -1), u = a.reduce((c, f) => c * f, 1); n.input_labels = new Tensor("int64", new BigInt64Array(u).fill(1n), a) } return await sessionRun(this.prompt_encoder_mask_decoder, { input_points: n.input_points, input_labels: n.input_labels, image_embeddings: n.image_embeddings, image_positional_embeddings: n.image_positional_embeddings }) } async _call(n) { return new SamImageSegmentationOutput(await super._call(n)) } } class SamImageSegmentationOutput extends ModelOutput { constructor({ iou_scores: n, pred_masks: a }) { super(), this.iou_scores = n, this.pred_masks = a } } class MarianPreTrainedModel extends PreTrainedModel {} class MarianModel extends MarianPreTrainedModel {} class MarianMTModel extends MarianPreTrainedModel { constructor(n, a, u, c) { super(n, a), this.decoder_merged_session = u, this.generation_config = c, this.num_decoder_layers = this.config.decoder_layers, this.num_decoder_heads = this.config.decoder_attention_heads, this.decoder_dim_kv = this.config.d_model / this.num_decoder_heads, this.num_encoder_layers = this.config.encoder_layers, this.num_encoder_heads = this.config.encoder_attention_heads, this.encoder_dim_kv = this.config.d_model / this.num_encoder_heads } } class M2M100PreTrainedModel extends PreTrainedModel {} class M2M100Model extends M2M100PreTrainedModel {} class M2M100ForConditionalGeneration extends M2M100PreTrainedModel { constructor(n, a, u, c) { super(n, a), this.decoder_merged_session = u, this.generation_config = c, this.num_decoder_layers = this.config.decoder_layers, this.num_decoder_heads = this.config.decoder_attention_heads, this.decoder_dim_kv = this.config.d_model / this.num_decoder_heads, this.num_encoder_layers = this.config.encoder_layers, this.num_encoder_heads = this.config.encoder_attention_heads, this.encoder_dim_kv = this.config.d_model / this.num_encoder_heads } } class Wav2Vec2PreTrainedModel extends PreTrainedModel {} class Wav2Vec2Model extends Wav2Vec2PreTrainedModel {} class Wav2Vec2ForCTC extends Wav2Vec2PreTrainedModel { async _call(n) { return new CausalLMOutput(await super._call(n)) } } class Wav2Vec2ForSequenceClassification extends Wav2Vec2PreTrainedModel { async _call(n) { return new SequenceClassifierOutput(await super._call(n)) } } class Wav2Vec2BertPreTrainedModel extends PreTrainedModel {} class Wav2Vec2BertModel extends Wav2Vec2BertPreTrainedModel {} class Wav2Vec2BertForCTC extends Wav2Vec2BertPreTrainedModel { async _call(n) { return new CausalLMOutput(await super._call(n)) } } class Wav2Vec2BertForSequenceClassification extends Wav2Vec2BertPreTrainedModel { async _call(n) { return new SequenceClassifierOutput(await super._call(n)) } } class HubertModel extends Wav2Vec2PreTrainedModel {} class HubertForCTC extends Wav2Vec2PreTrainedModel { async _call(n) { return new CausalLMOutput(await super._call(n)) } } class HubertForSequenceClassification extends Wav2Vec2PreTrainedModel { async _call(n) { return new SequenceClassifierOutput(await super._call(n)) } } class WavLMPreTrainedModel extends PreTrainedModel {} class WavLMModel extends WavLMPreTrainedModel {} class WavLMForCTC extends WavLMPreTrainedModel { async _call(n) { return new CausalLMOutput(await super._call(n)) } } class WavLMForSequenceClassification extends WavLMPreTrainedModel { async _call(n) { return new SequenceClassifierOutput(await super._call(n)) } } class SpeechT5PreTrainedModel extends PreTrainedModel {} class SpeechT5ForSpeechToText extends SpeechT5PreTrainedModel {} class SpeechT5ForTextToSpeech extends SpeechT5PreTrainedModel { constructor(n, a, u, c) { super(n, a), this.decoder_merged_session = u, this.generation_config = c, this.num_decoder_layers = this.config.decoder_layers, this.num_decoder_heads = this.config.decoder_attention_heads, this.decoder_dim_kv = this.config.hidden_size / this.num_decoder_heads, this.num_encoder_layers = this.config.encoder_layers, this.num_encoder_heads = this.config.encoder_attention_heads, this.encoder_dim_kv = this.config.hidden_size / this.num_encoder_heads } async generate_speech(n, a, { threshold: u = .5, minlenratio: c = 0, maxlenratio: f = 20, vocoder: s = null } = {}) { const h = { input_ids: n }, { encoder_outputs: p, encoder_attention_mask: l } = await encoderForward(this, h), o = p.dims[1] / this.config.reduction_factor, t = Math.floor(o * f), e = Math.floor(o * c), r = this.config.num_mel_bins; let i = [], d = null, g = null, m = 0; for (;;) { ++m; const T = boolTensor(!!g); let w; g ? w = g.output_sequence_out : w = new Tensor("float32", new Float32Array(r), [1, 1, r]); let S = { use_cache_branch: T, output_sequence: w, encoder_attention_mask: l, speaker_embeddings: a, encoder_hidden_states: p }; this.addPastKeyValues(S, d), g = await sessionRun(this.decoder_merged_session, S), d = this.getPastKeyValues(g, d); const { prob: O, spectrum: E } = g; if (i.push(E), m >= e && (Array.from(O.data).filter(v => v >= u).length > 0 || m >= t)) break } const _ = cat(i), { waveform: y } = await sessionRun(s.session, { spectrogram: _ }); return { spectrogram: _, waveform: y } } } class SpeechT5HifiGan extends PreTrainedModel { main_input_name = "spectrogram" } class TrOCRPreTrainedModel extends PreTrainedModel { constructor(n, a, u) { super(n, a), this.generation_config = u, this.config.pad_token_id = this.config.eos_token_id, this.num_encoder_layers = this.num_decoder_layers = this.config.decoder_layers, this.num_encoder_heads = this.num_decoder_heads = this.config.decoder_attention_heads, this.encoder_dim_kv = this.decoder_dim_kv = this.config.d_model / this.num_decoder_heads } } class TrOCRForCausalLM extends TrOCRPreTrainedModel {} class MistralPreTrainedModel extends PreTrainedModel { constructor(n, a, u) { super(n, a), this.generation_config = u, this.config.pad_token_id = this.config.eos_token_id, this.num_heads = this.config.num_key_value_heads, this.num_layers = this.config.num_hidden_layers, this.dim_kv = this.config.hidden_size / this.config.num_attention_heads } } class MistralModel extends MistralPreTrainedModel {} class MistralForCausalLM extends MistralPreTrainedModel {} class FalconPreTrainedModel extends PreTrainedModel { constructor(n, a, u) { super(n, a), this.generation_config = u, this.config.pad_token_id = this.config.eos_token_id, this.num_heads = this.config.num_attention_heads, this.num_layers = this.config.num_hidden_layers, this.dim_kv = this.config.hidden_size / this.config.num_attention_heads } } class FalconModel extends FalconPreTrainedModel {} class FalconForCausalLM extends FalconPreTrainedModel {} class ClapPreTrainedModel extends PreTrainedModel {} class ClapModel extends ClapPreTrainedModel {} class ClapTextModelWithProjection extends ClapPreTrainedModel { static async from_pretrained(n, a = {}) { return a.model_file_name ??= "text_model", super.from_pretrained(n, a) } } class ClapAudioModelWithProjection extends ClapPreTrainedModel { static async from_pretrained(n, a = {}) { return a.model_file_name ??= "audio_model", super.from_pretrained(n, a) } } class VitsPreTrainedModel extends PreTrainedModel {} class VitsModel extends VitsPreTrainedModel { async _call(n) { return new VitsModelOutput(await super._call(n)) } } class SegformerPreTrainedModel extends PreTrainedModel {} class SegformerForImageClassification extends SegformerPreTrainedModel {} class SegformerForSemanticSegmentation extends SegformerPreTrainedModel {} class PretrainedMixin { static MODEL_CLASS_MAPPINGS = null; static BASE_IF_FAIL = !1; static async from_pretrained(n, { quantized: a = !0, progress_callback: u = null, config: c = null, cache_dir: f = null, local_files_only: s = !1, revision: h = "main", model_file_name: p = null } = {}) { let l = { quantized: a, progress_callback: u, config: c, cache_dir: f, local_files_only: s, revision: h, model_file_name: p }; if (c = await AutoConfig.from_pretrained(n, l), l.config || (l.config = c), !this.MODEL_CLASS_MAPPINGS) throw new Error("`MODEL_CLASS_MAPPINGS` not implemented for this type of `AutoClass`: " + this.name); for (let o of this.MODEL_CLASS_MAPPINGS) { const t = o.get(c.model_type); if (t) return await t[1].from_pretrained(n, l) } if (this.BASE_IF_FAIL) return console.warn(`Unknown model class "${c.model_type}", attempting to construct from base class.`), await PreTrainedModel.from_pretrained(n, l); throw Error(`Unsupported model type: ${c.model_type}`) } } const MODEL_MAPPING_NAMES_ENCODER_ONLY = new Map([ ["bert", ["BertModel", BertModel]], ["roformer", ["RoFormerModel", RoFormerModel]], ["electra", ["ElectraModel", ElectraModel]], ["esm", ["EsmModel", EsmModel]], ["convbert", ["ConvBertModel", ConvBertModel]], ["camembert", ["CamembertModel", CamembertModel]], ["deberta", ["DebertaModel", DebertaModel]], ["deberta-v2", ["DebertaV2Model", DebertaV2Model]], ["mpnet", ["MPNetModel", MPNetModel]], ["albert", ["AlbertModel", AlbertModel]], ["distilbert", ["DistilBertModel", DistilBertModel]], ["roberta", ["RobertaModel", RobertaModel]], ["xlm", ["XLMModel", XLMModel]], ["xlm-roberta", ["XLMRobertaModel", XLMRobertaModel]], ["clap", ["ClapModel", ClapModel]], ["clip", ["CLIPModel", CLIPModel]], ["clipseg", ["CLIPSegModel", CLIPSegModel]], ["chinese_clip", ["ChineseCLIPModel", ChineseCLIPModel]], ["siglip", ["SiglipModel", SiglipModel]], ["mobilebert", ["MobileBertModel", MobileBertModel]], ["squeezebert", ["SqueezeBertModel", SqueezeBertModel]], ["wav2vec2", ["Wav2Vec2Model", Wav2Vec2Model]], ["wav2vec2-bert", ["Wav2Vec2BertModel", Wav2Vec2BertModel]], ["hubert", ["HubertModel", HubertModel]], ["wavlm", ["WavLMModel", WavLMModel]], ["audio-spectrogram-transformer", ["ASTModel", ASTModel]], ["vits", ["VitsModel", VitsModel]], ["detr", ["DetrModel", DetrModel]], ["table-transformer", ["TableTransformerModel", TableTransformerModel]], ["vit", ["ViTModel", ViTModel]], ["mobilevit", ["MobileViTModel", MobileViTModel]], ["owlvit", ["OwlViTModel", OwlViTModel]], ["beit", ["BeitModel", BeitModel]], ["deit", ["DeiTModel", DeiTModel]], ["convnext", ["ConvNextModel", ConvNextModel]], ["convnextv2", ["ConvNextV2Model", ConvNextV2Model]], ["dinov2", ["Dinov2Model", Dinov2Model]], ["resnet", ["ResNetModel", ResNetModel]], ["swin", ["SwinModel", SwinModel]], ["swin2sr", ["Swin2SRModel", Swin2SRModel]], ["donut-swin", ["DonutSwinModel", DonutSwinModel]], ["yolos", ["YolosModel", YolosModel]], ["dpt", ["DPTModel", DPTModel]], ["glpn", ["GLPNModel", GLPNModel]], ["hifigan", ["SpeechT5HifiGan", SpeechT5HifiGan]] ]), MODEL_MAPPING_NAMES_ENCODER_DECODER = new Map([ ["t5", ["T5Model", T5Model]], ["longt5", ["LongT5Model", LongT5Model]], ["mt5", ["MT5Model", MT5Model]], ["bart", ["BartModel", BartModel]], ["mbart", ["MBartModel", MBartModel]], ["marian", ["MarianModel", MarianModel]], ["whisper", ["WhisperModel", WhisperModel]], ["m2m_100", ["M2M100Model", M2M100Model]], ["blenderbot", ["BlenderbotModel", BlenderbotModel]], ["blenderbot-small", ["BlenderbotSmallModel", BlenderbotSmallModel]] ]), MODEL_MAPPING_NAMES_DECODER_ONLY = new Map([ ["bloom", ["BloomModel", BloomModel]], ["gpt2", ["GPT2Model", GPT2Model]], ["gptj", ["GPTJModel", GPTJModel]], ["gpt_bigcode", ["GPTBigCodeModel", GPTBigCodeModel]], ["gpt_neo", ["GPTNeoModel", GPTNeoModel]], ["gpt_neox", ["GPTNeoXModel", GPTNeoXModel]], ["codegen", ["CodeGenModel", CodeGenModel]], ["llama", ["LlamaModel", LlamaModel]], ["qwen2", ["Qwen2Model", Qwen2Model]], ["phi", ["PhiModel", PhiModel]], ["mpt", ["MptModel", MptModel]], ["opt", ["OPTModel", OPTModel]], ["mistral", ["MistralModel", MistralModel]], ["falcon", ["FalconModel", FalconModel]] ]), MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES = new Map([ ["speecht5", ["SpeechT5ForSpeechToText", SpeechT5ForSpeechToText]], ["whisper", ["WhisperForConditionalGeneration", WhisperForConditionalGeneration]] ]), MODEL_FOR_TEXT_TO_SPECTROGRAM_MAPPING_NAMES = new Map([ ["speecht5", ["SpeechT5ForTextToSpeech", SpeechT5ForTextToSpeech]] ]), MODEL_FOR_TEXT_TO_WAVEFORM_MAPPING_NAMES = new Map([ ["vits", ["VitsModel", VitsModel]] ]), MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES = new Map([ ["bert", ["BertForSequenceClassification", BertForSequenceClassification]], ["roformer", ["RoFormerForSequenceClassification", RoFormerForSequenceClassification]], ["electra", ["ElectraForSequenceClassification", ElectraForSequenceClassification]], ["esm", ["EsmForSequenceClassification", EsmForSequenceClassification]], ["convbert", ["ConvBertForSequenceClassification", ConvBertForSequenceClassification]], ["camembert", ["CamembertForSequenceClassification", CamembertForSequenceClassification]], ["deberta", ["DebertaForSequenceClassification", DebertaForSequenceClassification]], ["deberta-v2", ["DebertaV2ForSequenceClassification", DebertaV2ForSequenceClassification]], ["mpnet", ["MPNetForSequenceClassification", MPNetForSequenceClassification]], ["albert", ["AlbertForSequenceClassification", AlbertForSequenceClassification]], ["distilbert", ["DistilBertForSequenceClassification", DistilBertForSequenceClassification]], ["roberta", ["RobertaForSequenceClassification", RobertaForSequenceClassification]], ["xlm", ["XLMForSequenceClassification", XLMForSequenceClassification]], ["xlm-roberta", ["XLMRobertaForSequenceClassification", XLMRobertaForSequenceClassification]], ["bart", ["BartForSequenceClassification", BartForSequenceClassification]], ["mbart", ["MBartForSequenceClassification", MBartForSequenceClassification]], ["mobilebert", ["MobileBertForSequenceClassification", MobileBertForSequenceClassification]], ["squeezebert", ["SqueezeBertForSequenceClassification", SqueezeBertForSequenceClassification]] ]), MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES = new Map([ ["bert", ["BertForTokenClassification", BertForTokenClassification]], ["roformer", ["RoFormerForTokenClassification", RoFormerForTokenClassification]], ["electra", ["ElectraForTokenClassification", ElectraForTokenClassification]], ["esm", ["EsmForTokenClassification", EsmForTokenClassification]], ["convbert", ["ConvBertForTokenClassification", ConvBertForTokenClassification]], ["camembert", ["CamembertForTokenClassification", CamembertForTokenClassification]], ["deberta", ["DebertaForTokenClassification", DebertaForTokenClassification]], ["deberta-v2", ["DebertaV2ForTokenClassification", DebertaV2ForTokenClassification]], ["mpnet", ["MPNetForTokenClassification", MPNetForTokenClassification]], ["distilbert", ["DistilBertForTokenClassification", DistilBertForTokenClassification]], ["roberta", ["RobertaForTokenClassification", RobertaForTokenClassification]], ["xlm", ["XLMForTokenClassification", XLMForTokenClassification]], ["xlm-roberta", ["XLMRobertaForTokenClassification", XLMRobertaForTokenClassification]] ]), MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES = new Map([ ["t5", ["T5ForConditionalGeneration", T5ForConditionalGeneration]], ["longt5", ["LongT5ForConditionalGeneration", LongT5ForConditionalGeneration]], ["mt5", ["MT5ForConditionalGeneration", MT5ForConditionalGeneration]], ["bart", ["BartForConditionalGeneration", BartForConditionalGeneration]], ["mbart", ["MBartForConditionalGeneration", MBartForConditionalGeneration]], ["marian", ["MarianMTModel", MarianMTModel]], ["m2m_100", ["M2M100ForConditionalGeneration", M2M100ForConditionalGeneration]], ["blenderbot", ["BlenderbotForConditionalGeneration", BlenderbotForConditionalGeneration]], ["blenderbot-small", ["BlenderbotSmallForConditionalGeneration", BlenderbotSmallForConditionalGeneration]] ]), MODEL_WITH_LM_HEAD_MAPPING_NAMES = new Map([ ["bloom", ["BloomForCausalLM", BloomForCausalLM]], ["gpt2", ["GPT2LMHeadModel", GPT2LMHeadModel]], ["gptj", ["GPTJForCausalLM", GPTJForCausalLM]], ["gpt_bigcode", ["GPTBigCodeForCausalLM", GPTBigCodeForCausalLM]], ["gpt_neo", ["GPTNeoForCausalLM", GPTNeoForCausalLM]], ["gpt_neox", ["GPTNeoXForCausalLM", GPTNeoXForCausalLM]], ["codegen", ["CodeGenForCausalLM", CodeGenForCausalLM]], ["llama", ["LlamaForCausalLM", LlamaForCausalLM]], ["qwen2", ["Qwen2ForCausalLM", Qwen2ForCausalLM]], ["phi", ["PhiForCausalLM", PhiForCausalLM]], ["mpt", ["MptForCausalLM", MptForCausalLM]], ["opt", ["OPTForCausalLM", OPTForCausalLM]], ["mbart", ["MBartForCausalLM", MBartForCausalLM]], ["mistral", ["MistralForCausalLM", MistralForCausalLM]], ["falcon", ["FalconForCausalLM", FalconForCausalLM]], ["trocr", ["TrOCRForCausalLM", TrOCRForCausalLM]] ]), MODEL_FOR_MASKED_LM_MAPPING_NAMES = new Map([ ["bert", ["BertForMaskedLM", BertForMaskedLM]], ["roformer", ["RoFormerForMaskedLM", RoFormerForMaskedLM]], ["electra", ["ElectraForMaskedLM", ElectraForMaskedLM]], ["esm", ["EsmForMaskedLM", EsmForMaskedLM]], ["convbert", ["ConvBertForMaskedLM", ConvBertForMaskedLM]], ["camembert", ["CamembertForMaskedLM", CamembertForMaskedLM]], ["deberta", ["DebertaForMaskedLM", DebertaForMaskedLM]], ["deberta-v2", ["DebertaV2ForMaskedLM", DebertaV2ForMaskedLM]], ["mpnet", ["MPNetForMaskedLM", MPNetForMaskedLM]], ["albert", ["AlbertForMaskedLM", AlbertForMaskedLM]], ["distilbert", ["DistilBertForMaskedLM", DistilBertForMaskedLM]], ["roberta", ["RobertaForMaskedLM", RobertaForMaskedLM]], ["xlm", ["XLMWithLMHeadModel", XLMWithLMHeadModel]], ["xlm-roberta", ["XLMRobertaForMaskedLM", XLMRobertaForMaskedLM]], ["mobilebert", ["MobileBertForMaskedLM", MobileBertForMaskedLM]], ["squeezebert", ["SqueezeBertForMaskedLM", SqueezeBertForMaskedLM]] ]), MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES = new Map([ ["bert", ["BertForQuestionAnswering", BertForQuestionAnswering]], ["roformer", ["RoFormerForQuestionAnswering", RoFormerForQuestionAnswering]], ["electra", ["ElectraForQuestionAnswering", ElectraForQuestionAnswering]], ["convbert", ["ConvBertForQuestionAnswering", ConvBertForQuestionAnswering]], ["camembert", ["CamembertForQuestionAnswering", CamembertForQuestionAnswering]], ["deberta", ["DebertaForQuestionAnswering", DebertaForQuestionAnswering]], ["deberta-v2", ["DebertaV2ForQuestionAnswering", DebertaV2ForQuestionAnswering]], ["mpnet", ["MPNetForQuestionAnswering", MPNetForQuestionAnswering]], ["albert", ["AlbertForQuestionAnswering", AlbertForQuestionAnswering]], ["distilbert", ["DistilBertForQuestionAnswering", DistilBertForQuestionAnswering]], ["roberta", ["RobertaForQuestionAnswering", RobertaForQuestionAnswering]], ["xlm", ["XLMForQuestionAnswering", XLMForQuestionAnswering]], ["xlm-roberta", ["XLMRobertaForQuestionAnswering", XLMRobertaForQuestionAnswering]], ["mobilebert", ["MobileBertForQuestionAnswering", MobileBertForQuestionAnswering]], ["squeezebert", ["SqueezeBertForQuestionAnswering", SqueezeBertForQuestionAnswering]] ]), MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES = new Map([ ["vision-encoder-decoder", ["VisionEncoderDecoderModel", VisionEncoderDecoderModel]] ]), MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES = new Map([ ["vit", ["ViTForImageClassification", ViTForImageClassification]], ["mobilevit", ["MobileViTForImageClassification", MobileViTForImageClassification]], ["beit", ["BeitForImageClassification", BeitForImageClassification]], ["deit", ["DeiTForImageClassification", DeiTForImageClassification]], ["convnext", ["ConvNextForImageClassification", ConvNextForImageClassification]], ["convnextv2", ["ConvNextV2ForImageClassification", ConvNextV2ForImageClassification]], ["dinov2", ["Dinov2ForImageClassification", Dinov2ForImageClassification]], ["resnet", ["ResNetForImageClassification", ResNetForImageClassification]], ["swin", ["SwinForImageClassification", SwinForImageClassification]], ["segformer", ["SegformerForImageClassification", SegformerForImageClassification]] ]), MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES = new Map([ ["detr", ["DetrForObjectDetection", DetrForObjectDetection]], ["table-transformer", ["TableTransformerForObjectDetection", TableTransformerForObjectDetection]], ["yolos", ["YolosForObjectDetection", YolosForObjectDetection]] ]), MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES = new Map([ ["owlvit", ["OwlViTForObjectDetection", OwlViTForObjectDetection]] ]), MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES = new Map([ ["detr", ["DetrForSegmentation", DetrForSegmentation]], ["clipseg", ["CLIPSegForImageSegmentation", CLIPSegForImageSegmentation]] ]), MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES = new Map([ ["segformer", ["SegformerForSemanticSegmentation", SegformerForSemanticSegmentation]] ]), MODEL_FOR_MASK_GENERATION_MAPPING_NAMES = new Map([ ["sam", ["SamModel", SamModel]] ]), MODEL_FOR_CTC_MAPPING_NAMES = new Map([ ["wav2vec2", ["Wav2Vec2ForCTC", Wav2Vec2ForCTC]], ["wav2vec2-bert", ["Wav2Vec2BertForCTC", Wav2Vec2BertForCTC]], ["wavlm", ["WavLMForCTC", WavLMForCTC]], ["hubert", ["HubertForCTC", HubertForCTC]] ]), MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES = new Map([ ["wav2vec2", ["Wav2Vec2ForSequenceClassification", Wav2Vec2ForSequenceClassification]], ["wav2vec2-bert", ["Wav2Vec2BertForSequenceClassification", Wav2Vec2BertForSequenceClassification]], ["wavlm", ["WavLMForSequenceClassification", WavLMForSequenceClassification]], ["hubert", ["HubertForSequenceClassification", HubertForSequenceClassification]], ["audio-spectrogram-transformer", ["ASTForAudioClassification", ASTForAudioClassification]] ]), MODEL_FOR_IMAGE_MATTING_MAPPING_NAMES = new Map([ ["vitmatte", ["VitMatteForImageMatting", VitMatteForImageMatting]] ]), MODEL_FOR_IMAGE_TO_IMAGE_MAPPING_NAMES = new Map([ ["swin2sr", ["Swin2SRForImageSuperResolution", Swin2SRForImageSuperResolution]] ]), MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES = new Map([ ["dpt", ["DPTForDepthEstimation", DPTForDepthEstimation]], ["depth_anything", ["DepthAnythingForDepthEstimation", DepthAnythingForDepthEstimation]], ["glpn", ["GLPNForDepthEstimation", GLPNForDepthEstimation]] ]), MODEL_CLASS_TYPE_MAPPING = [ [MODEL_MAPPING_NAMES_ENCODER_ONLY, MODEL_TYPES.EncoderOnly], [MODEL_MAPPING_NAMES_ENCODER_DECODER, MODEL_TYPES.EncoderDecoder], [MODEL_MAPPING_NAMES_DECODER_ONLY, MODEL_TYPES.DecoderOnly], [MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES, MODEL_TYPES.EncoderOnly], [MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES, MODEL_TYPES.EncoderOnly], [MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, MODEL_TYPES.Seq2Seq], [MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES, MODEL_TYPES.Seq2Seq], [MODEL_WITH_LM_HEAD_MAPPING_NAMES, MODEL_TYPES.DecoderOnly], [MODEL_FOR_MASKED_LM_MAPPING_NAMES, MODEL_TYPES.EncoderOnly], [MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES, MODEL_TYPES.EncoderOnly], [MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES, MODEL_TYPES.Vision2Seq], [MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES, MODEL_TYPES.EncoderOnly], [MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES, MODEL_TYPES.EncoderOnly], [MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES, MODEL_TYPES.EncoderOnly], [MODEL_FOR_IMAGE_MATTING_MAPPING_NAMES, MODEL_TYPES.EncoderOnly], [MODEL_FOR_IMAGE_TO_IMAGE_MAPPING_NAMES, MODEL_TYPES.EncoderOnly], [MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES, MODEL_TYPES.EncoderOnly], [MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES, MODEL_TYPES.EncoderOnly], [MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES, MODEL_TYPES.EncoderOnly], [MODEL_FOR_MASK_GENERATION_MAPPING_NAMES, MODEL_TYPES.MaskGeneration], [MODEL_FOR_CTC_MAPPING_NAMES, MODEL_TYPES.EncoderOnly], [MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES, MODEL_TYPES.EncoderOnly], [MODEL_FOR_TEXT_TO_SPECTROGRAM_MAPPING_NAMES, MODEL_TYPES.Seq2Seq], [MODEL_FOR_TEXT_TO_WAVEFORM_MAPPING_NAMES, MODEL_TYPES.EncoderOnly] ]; for (const [b, n] of MODEL_CLASS_TYPE_MAPPING) for (const [a, u] of b.values()) MODEL_TYPE_MAPPING.set(a, n), MODEL_CLASS_TO_NAME_MAPPING.set(u, a), MODEL_NAME_TO_CLASS_MAPPING.set(a, u); const CUSTOM_MAPPING = [ ["CLIPTextModelWithProjection", CLIPTextModelWithProjection, MODEL_TYPES.EncoderOnly], ["CLIPVisionModelWithProjection", CLIPVisionModelWithProjection, MODEL_TYPES.EncoderOnly], ["SiglipTextModel", SiglipTextModel, MODEL_TYPES.EncoderOnly], ["SiglipVisionModel", SiglipVisionModel, MODEL_TYPES.EncoderOnly], ["ClapTextModelWithProjection", ClapTextModelWithProjection, MODEL_TYPES.EncoderOnly], ["ClapAudioModelWithProjection", ClapAudioModelWithProjection, MODEL_TYPES.EncoderOnly] ]; for (const [b, n, a] of CUSTOM_MAPPING) MODEL_TYPE_MAPPING.set(b, a), MODEL_CLASS_TO_NAME_MAPPING.set(n, b), MODEL_NAME_TO_CLASS_MAPPING.set(b, n); class AutoModel extends PretrainedMixin { static MODEL_CLASS_MAPPINGS = MODEL_CLASS_TYPE_MAPPING.map(n => n[0]); static BASE_IF_FAIL = !0 } class Seq2SeqLMOutput extends ModelOutput { constructor({ logits: n, past_key_values: a, encoder_outputs: u, decoder_attentions: c = null, cross_attentions: f = null }) { super(), this.logits = n, this.past_key_values = a, this.encoder_outputs = u, this.decoder_attentions = c, this.cross_attentions = f } } class SequenceClassifierOutput extends ModelOutput { constructor({ logits: n }) { super(), this.logits = n } } class TokenClassifierOutput extends ModelOutput { constructor({ logits: n }) { super(), this.logits = n } } class MaskedLMOutput extends ModelOutput { constructor({ logits: n }) { super(), this.logits = n } } class QuestionAnsweringModelOutput extends ModelOutput { constructor({ start_logits: n, end_logits: a }) { super(), this.start_logits = n, this.end_logits = a } } class CausalLMOutput extends ModelOutput { constructor({ logits: n }) { super(), this.logits = n } } class ImageMattingOutput extends ModelOutput { constructor({ alphas: n }) { super(), this.alphas = n } } class VitsModelOutput extends ModelOutput { constructor({ waveform: n, spectrogram: a }) { super(), this.waveform = n, this.spectrogram = a } } const BROWSER_ENV = typeof self < "u", WEBWORKER_ENV = BROWSER_ENV && self.constructor.name === "DedicatedWorkerGlobalScope"; let createCanvasFunction, ImageDataClass, loadImageFunction; if (BROWSER_ENV) createCanvasFunction = (b, n) => { if (!self.OffscreenCanvas) throw new Error("OffscreenCanvas not supported by this browser."); return new self.OffscreenCanvas(b, n) }, loadImageFunction = self.createImageBitmap, ImageDataClass = self.ImageData; else if (sharp) loadImageFunction = async b => { const a = (await b.metadata()).channels; let { data: u, info: c } = await b.raw().toBuffer({ resolveWithObject: !0 }); const f = new RawImage(new Uint8ClampedArray(u), c.width, c.height, c.channels); return a !== void 0 && a !== c.channels && f.convert(a), f }; else throw new Error("Unable to load image processing library."); const RESAMPLING_MAPPING = { 0: "nearest", 1: "lanczos", 2: "bilinear", 3: "bicubic", 4: "box", 5: "hamming" }, CONTENT_TYPE_MAP = new Map([ ["png", "image/png"], ["jpg", "image/jpeg"], ["jpeg", "image/jpeg"], ["gif", "image/gif"] ]); class RawImage { constructor(n, a, u, c) { this.data = n, this.width = a, this.height = u, this.channels = c } get size() { return [this.width, this.height] } static async read(n) { if (n instanceof RawImage) return n; if (typeof n == "string" || n instanceof URL) return await this.fromURL(n); throw new Error(`Unsupported input type: ${typeof n}`) } static async fromURL(n) { let a = await getFile(n); if (a.status !== 200) throw new Error(`Unable to read image from "${n}" (${a.status} ${a.statusText})`); let u = await a.blob(); return this.fromBlob(u) } static async fromBlob(n) { if (BROWSER_ENV) { let a = await loadImageFunction(n); const u = createCanvasFunction(a.width, a.height).getContext("2d"); return u.drawImage(a, 0, 0), new this(u.getImageData(0, 0, a.width, a.height).data, a.width, a.height, 4) } else { let a = sharp(await n.arrayBuffer()); return await loadImageFunction(a) } } static fromTensor(n, a = "CHW") { if (n.dims.length !== 3) throw new Error(`Tensor should have 3 dimensions, but has ${n.dims.length} dimensions.`); if (a === "CHW") n = n.transpose(1, 2, 0); else if (a !== "HWC") throw new Error(`Unsupported channel format: ${a}`); if (!(n.data instanceof Uint8ClampedArray || n.data instanceof Uint8Array)) throw new Error(`Unsupported tensor type: ${n.type}`); switch (n.dims[2]) { case 1: case 2: case 3: case 4: return new RawImage(n.data, n.dims[1], n.dims[0], n.dims[2]); default: throw new Error(`Unsupported number of channels: ${n.dims[2]}`) } } grayscale() { if (this.channels === 1) return this; let n = new Uint8ClampedArray(this.width * this.height * 1); switch (this.channels) { case 3: case 4: for (let a = 0, u = 0; a < this.data.length; a += this.channels) { const c = this.data[a], f = this.data[a + 1], s = this.data[a + 2]; n[u++] = Math.round(.2989 * c + .587 * f + .114 * s) } break; default: throw new Error(`Conversion failed due to unsupported number of channels: ${this.channels}`) } return this._update(n, this.width, this.height, 1) } rgb() { if (this.channels === 3) return this; let n = new Uint8ClampedArray(this.width * this.height * 3); switch (this.channels) { case 1: for (let a = 0, u = 0; a < this.data.length; ++a) n[u++] = this.data[a], n[u++] = this.data[a], n[u++] = this.data[a]; break; case 4: for (let a = 0, u = 0; a < this.data.length; a += 4) n[u++] = this.data[a], n[u++] = this.data[a + 1], n[u++] = this.data[a + 2]; break; default: throw new Error(`Conversion failed due to unsupported number of channels: ${this.channels}`) } return this._update(n, this.width, this.height, 3) } rgba() { if (this.channels === 4) return this; let n = new Uint8ClampedArray(this.width * this.height * 4); switch (this.channels) { case 1: for (let a = 0, u = 0; a < this.data.length; ++a) n[u++] = this.data[a], n[u++] = this.data[a], n[u++] = this.data[a], n[u++] = 255; break; case 3: for (let a = 0, u = 0; a < this.data.length; a += 3) n[u++] = this.data[a], n[u++] = this.data[a + 1], n[u++] = this.data[a + 2], n[u++] = 255; break; default: throw new Error(`Conversion failed due to unsupported number of channels: ${this.channels}`) } return this._update(n, this.width, this.height, 4) } async resize(n, a, { resample: u = 2 } = {}) { let c = RESAMPLING_MAPPING[u] ?? u; if (BROWSER_ENV) { let f = this.channels, s = this.toCanvas(); const h = createCanvasFunction(n, a).getContext("2d"); return h.drawImage(s, 0, 0, n, a), new RawImage(h.getImageData(0, 0, n, a).data, n, a, 4).convert(f) } else { let f = this.toSharp(); switch (c) { case "box": case "hamming": (c === "box" || c === "hamming") && (console.warn(`Resampling method ${c} is not yet supported. Using bilinear instead.`), c = "bilinear"); case "nearest": case "bilinear": case "bicubic": f = f.affine([n / this.width, 0, 0, a / this.height], { interpolator: c }); break; case "lanczos": f = f.resize({ width: n, height: a, fit: "fill", kernel: "lanczos3" }); break; default: throw new Error(`Resampling method ${c} is not supported.`) } return await loadImageFunction(f) } } async pad([n, a, u, c]) { if (n = Math.max(n, 0), a = Math.max(a, 0), u = Math.max(u, 0), c = Math.max(c, 0), n === 0 && a === 0 && u === 0 && c === 0) return this; if (BROWSER_ENV) { let f = this.channels, s = this.toCanvas(), h = this.width + n + a, p = this.height + u + c; const l = createCanvasFunction(h, p).getContext("2d"); return l.drawImage(s, 0, 0, this.width, this.height, n, u, h, p), new RawImage(l.getImageData(0, 0, h, p).data, h, p, 4).convert(f) } else { let f = this.toSharp().extend({ left: n, right: a, top: u, bottom: c }); return await loadImageFunction(f) } } async crop([n, a, u, c]) { if (n = Math.max(n, 0), a = Math.max(a, 0), u = Math.min(u, this.width - 1), c = Math.min(c, this.height - 1), n === 0 && a === 0 && u === this.width - 1 && c === this.height - 1) return this; const f = u - n + 1, s = c - a + 1; if (BROWSER_ENV) { const h = this.channels, p = this.toCanvas(), l = createCanvasFunction(f, s).getContext("2d"); return l.drawImage(p, n, a, f, s, 0, 0, f, s), new RawImage(l.getImageData(0, 0, f, s).data, f, s, 4).convert(h) } else { const h = this.toSharp().extract({ left: n, top: a, width: f, height: s }); return await loadImageFunction(h) } } async center_crop(n, a) { if (this.width === n && this.height === a) return this; let u = (this.width - n) / 2, c = (this.height - a) / 2; if (BROWSER_ENV) { let f = this.channels, s = this.toCanvas(); const h = createCanvasFunction(n, a).getContext("2d"); let p = 0, l = 0, o = 0, t = 0; return u >= 0 ? p = u : o = -u, c >= 0 ? l = c : t = -c, h.drawImage(s, p, l, n, a, o, t, n, a), new RawImage(h.getImageData(0, 0, n, a).data, n, a, 4).convert(f) } else { let f = this.toSharp(); if (u >= 0 && c >= 0) f = f.extract({ left: Math.floor(u), top: Math.floor(c), width: n, height: a }); else if (u <= 0 && c <= 0) { let s = Math.floor(-c), h = Math.floor(-u); f = f.extend({ top: s, left: h, right: n - this.width - h, bottom: a - this.height - s }) } else { let s = [0, 0], h = 0; c < 0 ? (s[0] = Math.floor(-c), s[1] = a - this.height - s[0]) : h = Math.floor(c); let p = [0, 0], l = 0; u < 0 ? (p[0] = Math.floor(-u), p[1] = n - this.width - p[0]) : l = Math.floor(u), f = f.extend({ top: s[0], bottom: s[1], left: p[0], right: p[1] }).extract({ left: l, top: h, width: n, height: a }) } return await loadImageFunction(f) } } async toBlob(n = "image/png", a = 1) { if (!BROWSER_ENV) throw new Error("toBlob() is only supported in browser environments."); return await this.toCanvas().convertToBlob({ type: n, quality: a }) } toCanvas() { if (!BROWSER_ENV) throw new Error("toCanvas() is only supported in browser environments."); let n = this.clone().rgba(), a = createCanvasFunction(n.width, n.height), u = new ImageDataClass(n.data, n.width, n.height); return a.getContext("2d").putImageData(u, 0, 0), a } _update(n, a, u, c = null) { return this.data = n, this.width = a, this.height = u, c !== null && (this.channels = c), this } clone() { return new RawImage(this.data.slice(), this.width, this.height, this.channels) } convert(n) { if (this.channels === n) return this; switch (n) { case 1: this.grayscale(); break; case 3: this.rgb(); break; case 4: this.rgba(); break; default: throw new Error(`Conversion failed due to unsupported number of channels: ${this.channels}`) } return this } async save(n) { if (BROWSER_ENV) { if (WEBWORKER_ENV) throw new Error("Unable to save an image from a Web Worker."); const a = n.split(".").pop().toLowerCase(), u = CONTENT_TYPE_MAP.get(a) ?? "image/png", c = await this.toBlob(u), f = URL.createObjectURL(c), s = document.createElement("a"); s.href = f, s.download = n, s.click(), s.remove() } else { if (env$1.useFS) return await this.toSharp().toFile(n); throw new Error("Unable to save the image because filesystem is disabled in this environment.") } } toSharp() { if (BROWSER_ENV) throw new Error("toSharp() is only supported in server-side environments."); return sharp(this.data, { raw: { width: this.width, height: this.height, channels: this.channels } }) } } function hanning(b) { if (b < 1) return new Float64Array; if (b === 1) return new Float64Array([1]); const n = b - 1, a = Math.PI / n, u = new Float64Array(b); for (let c = 0; c < b; ++c) { const f = 2 * c - n; u[c] = .5 + .5 * Math.cos(a * f) } return u } const HERTZ_TO_MEL_MAPPING = { htk: b => 2595 * Math.log10(1 + b / 700), kaldi: b => 1127 * Math.log(1 + b / 700), slaney: (b, n = 1e3, a = 15, u = 27 / Math.log(6.4)) => b >= n ? a + Math.log(b / n) * u : 3 * b / 200 }; function hertz_to_mel(b, n = "htk") { const a = HERTZ_TO_MEL_MAPPING[n]; if (!a) throw new Error('mel_scale should be one of "htk", "slaney" or "kaldi".'); return typeof b == "number" ? a(b) : b.map(u => a(u)) } const MEL_TO_HERTZ_MAPPING = { htk: b => 700 * (10 ** (b / 2595) - 1), kaldi: b => 700 * (Math.exp(b / 1127) - 1), slaney: (b, n = 1e3, a = 15, u = Math.log(6.4) / 27) => b >= a ? n * Math.exp(u * (b - a)) : 200 * b / 3 }; function mel_to_hertz(b, n = "htk") { const a = MEL_TO_HERTZ_MAPPING[n]; if (!a) throw new Error('mel_scale should be one of "htk", "slaney" or "kaldi".'); return typeof b == "number" ? a(b) : b.map(u => a(u)) } function _create_triangular_filter_bank(b, n) { const a = Float64Array.from({ length: n.length - 1 }, (s, h) => n[h + 1] - n[h]), u = Array.from({ length: b.length }, () => new Array(n.length)); for (let s = 0; s < b.length; ++s) { const h = u[s]; for (let p = 0; p < n.length; ++p) h[p] = n[p] - b[s] } const c = n.length - 2, f = Array.from({ length: c }, () => new Array(b.length)); for (let s = 0; s < b.length; ++s) { const h = u[s]; for (let p = 0; p < c; ++p) { const l = -h[p] / a[p], o = h[p + 2] / a[p + 1]; f[p][s] = Math.max(0, Math.min(l, o)) } } return f } function linspace(b, n, a) { const u = (n - b) / (a - 1); return Float64Array.from({ length: a }, (c, f) => b + u * f) } function mel_filter_bank(b, n, a, u, c, f = null, s = "htk", h = !1) { if (f !== null && f !== "slaney") throw new Error('norm must be one of null or "slaney"'); const p = hertz_to_mel(a, s), l = hertz_to_mel(u, s), o = linspace(p, l, n + 2); let t = mel_to_hertz(o, s), e; if (h) { const i = c / (b * 2); e = hertz_to_mel(Float64Array.from({ length: b }, (d, g) => g * i), s), t = o } else e = linspace(0, Math.floor(c / 2), b); const r = _create_triangular_filter_bank(e, t); if (f !== null && f === "slaney") for (let i = 0; i < n; ++i) { const d = r[i], g = 2 / (t[i + 2] - t[i]); for (let m = 0; m < b; ++m) d[m] *= g } return r } function padReflect(b, n, a) { const u = new b.constructor(b.length + n + a), c = b.length - 1; for (let f = 0; f < b.length; ++f) u[n + f] = b[f]; for (let f = 1; f <= n; ++f) u[n - f] = b[calculateReflectOffset(f, c)]; for (let f = 1; f <= a; ++f) u[c + n + f] = b[calculateReflectOffset(c - f, c)]; return u } function _db_conversion_helper(b, n, a, u, c) { if (a <= 0) throw new Error("reference must be greater than zero"); if (u <= 0) throw new Error("min_value must be greater than zero"); a = Math.max(u, a); const f = Math.log10(a); for (let s = 0; s < b.length; ++s) b[s] = n * Math.log10(Math.max(u, b[s]) - f); if (c !== null) { if (c <= 0) throw new Error("db_range must be greater than zero"); const s = max(b)[0] - c; for (let h = 0; h < b.length; ++h) b[h] = Math.max(b[h], s) } return b } function amplitude_to_db(b, n = 1, a = 1e-5, u = null) { return _db_conversion_helper(b, 20, n, a, u) } function power_to_db(b, n = 1, a = 1e-10, u = null) { return _db_conversion_helper(b, 10, n, a, u) } function spectrogram(b, n, a, u, { fft_length: c = null, power: f = 1, center: s = !0, pad_mode: h = "reflect", onesided: p = !0, preemphasis: l = null, mel_filters: o = null, mel_floor: t = 1e-10, log_mel: e = null, reference: r = 1, min_value: i = 1e-10, db_range: d = null, remove_dc_offset: g = null, max_num_frames: m = null, do_pad: _ = !0, transpose: y = !1 } = {}) { const T = n.length; if (c === null && (c = a), a > c) throw Error(`frame_length (${a}) may not be larger than fft_length (${c})`); if (T !== a) throw new Error(`Length of the window (${T}) must equal frame_length (${a})`); if (u <= 0) throw new Error("hop_length must be greater than zero"); if (s) { if (h !== "reflect") throw new Error(`pad_mode="${h}" not implemented yet.`); const C = Math.floor((c - 1) / 2) + 1; b = padReflect(b, C, C) } const w = Math.floor(1 + Math.floor((b.length - a) / u)), S = p ? Math.floor(c / 2) + 1 : c; let O = w, E = w; m !== null && (m > w ? _ && (E = m) : E = O = m); const v = new FFT(c), P = new Float64Array(c), L = new Float64Array(v.outputBufferSize), V = new Array(O); for (let C = 0; C < O; ++C) { const $ = C * u; for (let z = 0; z < a; ++z) P[z] = b[$ + z]; if (g) { let z = 0; for (let J = 0; J < a; ++J) z += P[J]; const Z = z / a; for (let J = 0; J < a; ++J) P[J] -= Z } if (l !== null) { for (let z = a - 1; z >= 1; --z) P[z] -= l * P[z - 1]; P[0] *= 1 - l } for (let z = 0; z < n.length; ++z) P[z] *= n[z]; v.realTransform(L, P); const X = new Array(S); for (let z = 0; z < X.length; ++z) { const Z = z << 1; X[z] = L[Z] ** 2 + L[Z + 1] ** 2 } V[C] = X } if (f !== null && f !== 2) { const C = 2 / f; for (let $ = 0; $ < V.length; ++$) { const X = V[$]; for (let z = 0; z < X.length; ++z) X[z] **= C } } const R = o.length, k = new Float32Array(R * E), Y = y ? [E, R] : [R, E]; for (let C = 0; C < R; ++C) { const $ = o[C]; for (let X = 0; X < O; ++X) { const z = V[X]; let Z = 0; for (let J = 0; J < S; ++J) Z += $[J] * z[J]; k[y ? X * R + C : C * O + X] = Math.max(t, Z) } } if (f !== null && e !== null) { const C = Math.min(k.length, O * R); switch (e) { case "log": for (let $ = 0; $ < C; ++$) k[$] = Math.log(k[$]); break; case "log10": for (let $ = 0; $ < C; ++$) k[$] = Math.log10(k[$]); break; case "dB": if (f === 1) amplitude_to_db(k, r, i, d); else if (f === 2) power_to_db(k, r, i, d); else throw new Error(`Cannot use log_mel option '${e}' with power ${f}`); break; default: throw new Error(`log_mel must be one of null, 'log', 'log10' or 'dB'. Got '${e}'`) } } return { data: k, dims: Y } } function window_function(b, n, { periodic: a = !0, frame_length: u = null, center: c = !0 } = {}) { const f = a ? b + 1 : b; let s; switch (n) { case "boxcar": s = new Float64Array(f).fill(1); break; case "hann": case "hann_window": s = hanning(f); break; case "povey": s = hanning(f).map(h => Math.pow(h, .85)); break; default: throw new Error(`Unknown window type ${n}.`) } if (a && (s = s.subarray(0, b)), u === null) return s; if (b > u) throw new Error(`Length of the window (${b}) may not be larger than frame_length (${u})`); return s } function center_to_corners_format([b, n, a, u]) { return [b - a / 2, n - u / 2, b + a / 2, n + u / 2] } function post_process_object_detection(b, n = .5, a = null, u = !1) { const c = b.logits, f = b.pred_boxes, [s, h, p] = c.dims; if (a !== null && a.length !== s) throw Error("Make sure that you pass in as many target sizes as the batch dimension of the logits"); let l = []; for (let o = 0; o < s; ++o) { let t = a !== null ? a[o] : null, e = { boxes: [], classes: [], scores: [] }, r = c[o], i = f[o]; for (let d = 0; d < h; ++d) { let g = r[d], m = [], _; if (u) { _ = g.sigmoid().data; for (let y = 0; y < _.length; ++y) _[y] > n && m.push(y) } else { let y = max(g.data)[1]; if (y === p - 1) continue; m.push(y), _ = softmax(g.data) } for (const y of m) { let T = i[d].data; T = center_to_corners_format(T), t !== null && (T = T.map((w, S) => w * t[(S + 1) % 2])), e.boxes.push(T), e.classes.push(y), e.scores.push(_[y]) } } l.push(e) } return l } function validate_audio_inputs(b, n) { if (!(b instanceof Float32Array || b instanceof Float64Array)) throw new Error(`${n} expects input to be a Float32Array or a Float64Array, but got ${b?.constructor?.name??typeof b} instead.If using the feature extractor directly, remember to use \`read_audio(url, sampling_rate)\` to obtain the raw audio data of the file/url.`) } function constraint_to_multiple_of(b, n, a = 0, u = null) { let c = Math.round(b / n) * n; return u !== null && c > u && (c = Math.floor(b / n) * n), c < a && (c = Math.ceil(b / n) * n), c } function enforce_size_divisibility([b, n], a) { return [Math.floor(b / a) * a, Math.floor(n / a) * a] } class FeatureExtractor extends Callable { constructor(n) { super(), this.config = n } } class ImageFeatureExtractor extends FeatureExtractor { constructor(n) { super(n), this.image_mean = this.config.image_mean ?? this.config.mean, this.image_std = this.config.image_std ?? this.config.std, this.resample = this.config.resample ?? 2, this.do_rescale = this.config.do_rescale ?? !0, this.rescale_factor = this.config.rescale_factor ?? 1 / 255, this.do_normalize = this.config.do_normalize, this.do_resize = this.config.do_resize, this.do_thumbnail = this.config.do_thumbnail, this.size = this.config.size, this.size_divisibility = this.config.size_divisibility ?? this.config.size_divisor, this.do_center_crop = this.config.do_center_crop, this.crop_size = this.config.crop_size, this.do_convert_rgb = this.config.do_convert_rgb ?? !0, this.do_crop_margin = this.config.do_crop_margin, this.pad_size = this.config.pad_size, this.do_pad = this.config.do_pad, this.do_pad && !this.pad_size && this.size && this.size.width !== void 0 && this.size.height !== void 0 && (this.pad_size = this.size) } async thumbnail(n, a, u = 2) { const c = n.height, f = n.width, s = a.height, h = a.width; let p = Math.min(c, s), l = Math.min(f, h); return p === c && l === f ? n : (c > f ? l = Math.floor(f * p / c) : f > c && (p = Math.floor(c * l / f)), await n.resize(l, p, { resample: u })) } async crop_margin(n, a = 200) { const u = n.clone().grayscale(), c = min(u.data)[0], s = max(u.data)[0] - c; if (s === 0) return n; const h = a / 255; let p = u.width, l = u.height, o = 0, t = 0; for (let e = 0; e < u.height; ++e) { const r = e * u.width; for (let i = 0; i < u.width; ++i)(u.data[r + i] - c) / s < h && (p = Math.min(p, i), l = Math.min(l, e), o = Math.max(o, i), t = Math.max(t, e)) } return n = await n.crop([p, l, o, t]), n } pad_image(n, a, u, { mode: c = "constant", center: f = !1, constant_values: s = 0 } = {}) { const [h, p, l] = a; let o, t; if (typeof u == "number" ? (o = u, t = u) : (o = u.width, t = u.height), o !== h || t !== p) { const e = new Float32Array(o * t * l); if (Array.isArray(s)) for (let d = 0; d < e.length; ++d) e[d] = s[d % l]; else s !== 0 && e.fill(s); const [r, i] = f ? [Math.floor((o - h) / 2), Math.floor((t - p) / 2)] : [0, 0]; for (let d = 0; d < p; ++d) { const g = (d + i) * o, m = d * h; for (let _ = 0; _ < h; ++_) { const y = (g + _ + r) * l, T = (m + _) * l; for (let w = 0; w < l; ++w) e[y + w] = n[T + w] } } if (c === "symmetric") { if (f) throw new Error("`center` padding is not supported when `mode` is set to `symmetric`."); const d = p - 1, g = h - 1; for (let m = 0; m < t; ++m) { const _ = m * o, y = calculateReflectOffset(m, d) * h; for (let T = 0; T < o; ++T) { if (m < p && T < h) continue; const w = (_ + T) * l, S = (y + calculateReflectOffset(T, g)) * l; for (let O = 0; O < l; ++O) e[w + O] = n[S + O] } } } n = e, a = [t, o, l] } return [n, a] } rescale(n) { for (let a = 0; a < n.length; ++a) n[a] = this.rescale_factor * n[a] } get_resize_output_image_size(n, a) { const [u, c] = n.size; let f, s; if (this.do_thumbnail) { const { height: h, width: p } = a; f = Math.min(h, p) } else Number.isInteger(a) ? (f = a, s = this.config.max_size ?? f) : a !== void 0 && (f = a.shortest_edge, s = a.longest_edge); if (f !== void 0 || s !== void 0) { const h = f === void 0 ? 1 : Math.max(f / u, f / c), p = u * h, l = c * h, o = s === void 0 ? 1 : Math.min(s / p, s / l); let t = Math.floor(Number((p * o).toFixed(2))), e = Math.floor(Number((l * o).toFixed(2))); return this.size_divisibility !== void 0 && ([t, e] = enforce_size_divisibility([t, e], this.size_divisibility)), [t, e] } else if (a !== void 0 && a.width !== void 0 && a.height !== void 0) { let h = a.width, p = a.height; if (this.config.keep_aspect_ratio && this.config.ensure_multiple_of) { let l = a.height / c, o = a.width / u; Math.abs(1 - o) < Math.abs(1 - l) ? l = o : o = l, p = constraint_to_multiple_of(l * c, this.config.ensure_multiple_of), h = constraint_to_multiple_of(o * u, this.config.ensure_multiple_of) } return [h, p] } else { if (this.size_divisibility !== void 0) return enforce_size_divisibility([u, c], this.size_divisibility); throw new Error(`Could not resize image due to unsupported \`this.size\` option in config: ${JSON.stringify(a)}`) } } async resize(n) { const [a, u] = this.get_resize_output_image_size(n, this.size); return await n.resize(a, u, { resample: this.resample }) } async preprocess(n, { do_normalize: a = null, do_pad: u = null, do_convert_rgb: c = null, do_convert_grayscale: f = null } = {}) { this.do_crop_margin && (n = await this.crop_margin(n)); const [s, h] = n.size; if (c ?? this.do_convert_rgb ? n = n.rgb() : f && (n = n.grayscale()), this.do_resize && (n = await this.resize(n)), this.do_thumbnail && (n = await this.thumbnail(n, this.size, this.resample)), this.do_center_crop) { let r, i; Number.isInteger(this.crop_size) ? (r = this.crop_size, i = this.crop_size) : (r = this.crop_size.width, i = this.crop_size.height), n = await n.center_crop(r, i) } const p = [n.height, n.width]; let l = Float32Array.from(n.data), o = [n.height, n.width, n.channels]; if (this.do_rescale && this.rescale(l), a ?? this.do_normalize) { let r = this.image_mean; Array.isArray(this.image_mean) || (r = new Array(n.channels).fill(r)); let i = this.image_std; if (Array.isArray(this.image_std) || (i = new Array(n.channels).fill(r)), r.length !== n.channels || i.length !== n.channels) throw new Error(`When set to arrays, the length of \`image_mean\` (${r.length}) and \`image_std\` (${i.length}) must match the number of channels in the image (${n.channels}).`); for (let d = 0; d < l.length; d += n.channels) for (let g = 0; g < n.channels; ++g) l[d + g] = (l[d + g] - this.image_mean[g]) / this.image_std[g] }(u ?? (this.do_pad && this.pad_size)) && ([l, o] = this.pad_image(l, [n.width, n.height, n.channels], this.pad_size)); const t = new Tensor("float32", l, o), e = transpose(t, [2, 0, 1]); return { original_size: [h, s], reshaped_input_size: p, pixel_values: e } } async _call(n, ...a) { Array.isArray(n) || (n = [n]); const u = await Promise.all(n.map(f => this.preprocess(f))); return { pixel_values: stack(u.map(f => f.pixel_values), 0), original_sizes: u.map(f => f.original_size), reshaped_input_sizes: u.map(f => f.reshaped_input_size) } } } class SegformerFeatureExtractor extends ImageFeatureExtractor { post_process_semantic_segmentation(n, a = null) { const u = n.logits, c = u.dims[0]; if (a !== null && a.length !== c) throw Error("Make sure that you pass in as many target sizes as the batch dimension of the logits"); const f = []; for (let s = 0; s < c; ++s) { const h = a !== null ? a[s] : null; let p = u[s]; h !== null && (p = interpolate(p, h, "bilinear", !1)); const [l, o] = h ?? p.dims.slice(-2), t = new Tensor("int32", new Int32Array(l * o), [l, o]), e = p[0].data; for (let g = 1; g < p.dims[0]; ++g) { const m = p[g].data; for (let _ = 0; _ < m.length; ++_) m[_] > e[_] && (e[_] = m[_], t.data[_] = g) } const r = new Array(p.dims[0]), i = t.data; for (let g = 0; g < i.length; ++g) { const m = i[g]; r[m] = m } const d = r.filter(g => g !== void 0); f.push({ segmentation: t, labels: d }) } return f } } class DPTImageProcessor extends ImageFeatureExtractor {} class BitImageProcessor extends ImageFeatureExtractor {} class DPTFeatureExtractor extends ImageFeatureExtractor {} class GLPNFeatureExtractor extends ImageFeatureExtractor {} class CLIPFeatureExtractor extends ImageFeatureExtractor {} class ChineseCLIPFeatureExtractor extends ImageFeatureExtractor {} class SiglipImageProcessor extends ImageFeatureExtractor {} class ConvNextFeatureExtractor extends ImageFeatureExtractor { constructor(n) { super(n), this.crop_pct = this.config.crop_pct ?? 224 / 256 } async resize(n) { const a = this.size?.shortest_edge; if (a === void 0) throw new Error("Size dictionary must contain 'shortest_edge' key."); if (a < 384) { const u = Math.floor(a / this.crop_pct), [c, f] = this.get_resize_output_image_size(n, { shortest_edge: u }); n = await n.resize(c, f, { resample: this.resample }), n = await n.center_crop(a, a) } else n = await n.resize(a, a, { resample: this.resample }); return n } } class ConvNextImageProcessor extends ConvNextFeatureExtractor {} class ViTFeatureExtractor extends ImageFeatureExtractor {} class ViTImageProcessor extends ImageFeatureExtractor {} class MobileViTFeatureExtractor extends ImageFeatureExtractor {} class OwlViTFeatureExtractor extends ImageFeatureExtractor { post_process_object_detection(...n) { return post_process_object_detection(...n) } } class DeiTFeatureExtractor extends ImageFeatureExtractor {} class BeitFeatureExtractor extends ImageFeatureExtractor {} class DonutFeatureExtractor extends ImageFeatureExtractor { pad_image(n, a, u, c = {}) { const [f, s, h] = a; let p = this.image_mean; Array.isArray(this.image_mean) || (p = new Array(h).fill(p)); let l = this.image_std; Array.isArray(l) || (l = new Array(h).fill(p)); const o = p.map((t, e) => -t / this.image_std[e]); return super.pad_image(n, a, u, { center: !0, constant_values: o, ...c }) } } class NougatImageProcessor extends DonutFeatureExtractor {} class DetrFeatureExtractor extends ImageFeatureExtractor { async _call(n) { const a = await super._call(n), u = [a.pixel_values.dims[0], 64, 64], c = new Tensor("int64", new BigInt64Array(u.reduce((f, s) => f * s)).fill(1n), u); return { ...a, pixel_mask: c } } post_process_object_detection(...n) { return post_process_object_detection(...n) } remove_low_and_no_objects(n, a, u, c) { let f = [], s = [], h = []; for (let p = 0; p < n.dims[0]; ++p) { let l = n[p], o = a[p], t = max(l.data)[1]; if (t === c) continue; let r = softmax(l.data)[t]; r > u && (f.push(o), s.push(r), h.push(t)) } return [f, s, h] } check_segment_validity(n, a, u, c = .5, f = .8) { let s = [], h = 0, p = 0; for (let o = 0; o < n.length; ++o) n[o] === u && (s.push(o), ++h), a[u].data[o] >= c && ++p; let l = h > 0 && p > 0; return l && (l = h / p > f), [l, s] } compute_segments(n, a, u, c, f, s = null, h = null) { let [p, l] = h ?? n[0].dims, o = new Tensor("int32", new Int32Array(p * l), [p, l]), t = []; if (h !== null) for (let d = 0; d < n.length; ++d) n[d] = interpolate(n[d], h, "bilinear", !1); let e = new Int32Array(n[0].data.length), r = new Float32Array(n[0].data.length); for (let d = 0; d < n.length; ++d) { let g = a[d]; for (let m = 0; m < n[d].data.length; ++m) n[d].data[m] *= g, n[d].data[m] > r[m] && (e[m] = d, r[m] = n[d].data[m]) } let i = 0; for (let d = 0; d < u.length; ++d) { let g = u[d], [m, _] = this.check_segment_validity(e, n, d, c, f); if (m) { ++i; for (let y of _) o.data[y] = i; t.push({ id: i, label_id: g, score: a[d] }) } } return [o, t] } post_process_panoptic_segmentation(n, a = .5, u = .5, c = .8, f = null, s = null) { f === null && (console.warn("`label_ids_to_fuse` unset. No instance will be fused."), f = new Set); const h = n.logits, l = n.pred_masks.sigmoid(); let [o, t, e] = h.dims; if (e -= 1, s !== null && s.length !== o) throw Error("Make sure that you pass in as many target sizes as the batch dimension of the logits"); let r = []; for (let i = 0; i < o; ++i) { let d = s !== null ? s[i] : null, g = h[i], m = l[i], [_, y, T] = this.remove_low_and_no_objects(g, m, a, e); if (T.length === 0) { let [O, E] = d ?? m.dims.slice(-2), v = new Tensor("int32", new Int32Array(O * E).fill(-1), [O, E]); r.push({ segmentation: v, segments_info: [] }); continue } let [w, S] = this.compute_segments(_, y, T, u, c, f, d); r.push({ segmentation: w, segments_info: S }) } return r } post_process_instance_segmentation() { throw Error("Not implemented yet") } } class YolosFeatureExtractor extends ImageFeatureExtractor { post_process_object_detection(...n) { return post_process_object_detection(...n) } } class SamImageProcessor extends ImageFeatureExtractor { reshape_input_points(n, a, u) { n = structuredClone(n); let c = calculateDimensions(n); if (c.length === 3) c = [1, ...c], n = [n]; else if (c.length !== 4) throw Error("The input_points must be a 4D tensor of shape `batch_size`, `point_batch_size`, `nb_points_per_image`, `2`."); for (let f = 0; f < n.length; ++f) { let s = a[f], h = u[f], p = [h[0] / s[0], h[1] / s[1]]; for (let l = 0; l < n[f].length; ++l) for (let o = 0; o < n[f][l].length; ++o) for (let t = 0; t < n[f][l][o].length; ++t) n[f][l][o][t] *= p[t] } return new Tensor("float32", Float32Array.from(n.flat(1 / 0)), c) } add_input_labels(n, a) { let u = calculateDimensions(n); if (u.length === 2) u = [1, ...u], n = [n]; else if (u.length !== 3) throw Error("The input_points must be a 4D tensor of shape `batch_size`, `point_batch_size`, `nb_points_per_image`, `2`."); if (u.some((c, f) => c !== a.dims[f])) throw Error(`The first ${u.length} dimensions of 'input_points' and 'input_labels' must be the same.`); return new Tensor("int64", n.flat(1 / 0).map(BigInt), u) } async _call(n, a = null, u = null) { const c = await super._call(n); if (a && (c.input_points = this.reshape_input_points(a, c.original_sizes, c.reshaped_input_sizes)), u) { if (!c.input_points) throw Error("`input_points` must be provided if `input_labels` are provided."); c.input_labels = this.add_input_labels(u, c.input_points) } return c } post_process_masks(n, a, u, { mask_threshold: c = 0, binarize: f = !0, pad_size: s = null } = {}) { const h = []; s = s ?? this.pad_size; const p = [s.height, s.width]; for (let l = 0; l < a.length; ++l) { const o = a[l], t = u[l], e = n[l], r = []; for (let i = 0; i < e.dims[0]; ++i) { const d = e[i]; let g = interpolate(d, p, "bilinear", !1); if (g = g.slice(null, [0, t[0]], [0, t[1]]), g = interpolate(g, o, "bilinear", !1), f) { const m = new Uint8Array(g.data.length); for (let _ = 0; _ < g.data.length; ++_) g.data[_] > c && (m[_] = 1); g = new Tensor("bool", m, g.dims) } r.push(g) } h.push(stack(r)) } return h } } class Swin2SRImageProcessor extends ImageFeatureExtractor { pad_image(n, a, u, c = {}) { const [f, s, h] = a; return super.pad_image(n, a, { width: f + (u - f % u) % u, height: s + (u - s % u) % u }, { mode: "symmetric", center: !1, constant_values: -1, ...c }) } } class VitMatteImageProcessor extends ImageFeatureExtractor { async _call(n, a) { Array.isArray(n) || (n = [n]), Array.isArray(a) || (a = [a]); const u = await Promise.all(n.map(s => this.preprocess(s))), c = await Promise.all(a.map(s => this.preprocess(s, { do_normalize: !1, do_convert_rgb: !1, do_convert_grayscale: !0 }))); return { pixel_values: stack(u.map((s, h) => cat([s.pixel_values, c[h].pixel_values], 0)), 0), original_sizes: u.map(s => s.original_size), reshaped_input_sizes: u.map(s => s.reshaped_input_size) } } } class WhisperFeatureExtractor extends FeatureExtractor { constructor(n) { super(n), this.config.mel_filters ??= mel_filter_bank(Math.floor(1 + this.config.n_fft / 2), this.config.feature_size, 0, 8e3, this.config.sampling_rate, "slaney", "slaney"), this.window = window_function(this.config.n_fft, "hann") } _extract_fbank_features(n) { const { data: a, dims: u } = spectrogram(n, this.window, this.config.n_fft, this.config.hop_length, { power: 2, mel_filters: this.config.mel_filters, log_mel: "log10", max_num_frames: this.config.nb_max_frames }), c = max(a)[0]; for (let f = 0; f < a.length; ++f) a[f] = (Math.max(a[f], c - 8) + 4) / 4; return { data: a, dims: u } } async _call(n) { validate_audio_inputs(n, "WhisperFeatureExtractor"); let a; n.length > this.config.n_samples ? (console.warn("Attempting to extract features for audio longer than 30 seconds. If using a pipeline to extract transcript from a long audio clip, remember to specify `chunk_length_s` and/or `stride_length_s`."), a = n.slice(0, this.config.n_samples)) : (a = new Float32Array(this.config.n_samples), a.set(n)); const { data: u, dims: c } = this._extract_fbank_features(a); return { input_features: new Tensor("float32", u, [1, ...c]) } } } class Wav2Vec2FeatureExtractor extends FeatureExtractor { _zero_mean_unit_var_norm(n) { const u = n.reduce((f, s) => f + s, 0) / n.length, c = n.reduce((f, s) => f + (s - u) ** 2, 0) / n.length; return n.map(f => (f - u) / Math.sqrt(c + 1e-7)) } async _call(n) { validate_audio_inputs(n, "Wav2Vec2FeatureExtractor"), n instanceof Float64Array && (n = new Float32Array(n)); let a = n; this.config.do_normalize && (a = this._zero_mean_unit_var_norm(a)); const u = [1, a.length]; return { input_values: new Tensor("float32", a, u), attention_mask: new Tensor("int64", new BigInt64Array(a.length).fill(1n), u) } } } class SeamlessM4TFeatureExtractor extends FeatureExtractor { constructor(n) { super(n); const a = this.config.sampling_rate, u = mel_filter_bank(256, this.config.num_mel_bins, 20, Math.floor(a / 2), a, null, "kaldi", !0); for (let c = 0; c < u.length; ++c) u[c].push(0); this.mel_filters = u, this.window = window_function(400, "povey", { periodic: !1 }) } _extract_fbank_features(n, a) { return n = n.map(u => u * 32768), spectrogram(n, this.window, 400, 160, { fft_length: 512, power: 2, center: !1, preemphasis: .97, mel_filters: this.mel_filters, log_mel: "log", mel_floor: 1192092955078125e-22, remove_dc_offset: !0, max_num_frames: a, transpose: !0 }) } async _call(n, { padding: a = !0, pad_to_multiple_of: u = 2, do_normalize_per_mel_bins: c = !0, return_attention_mask: f = !0 } = {}) { validate_audio_inputs(n, "SeamlessM4TFeatureExtractor"); let s = this._extract_fbank_features(n, this.config.max_length); if (c) { const [i, d] = s.dims; for (let g = 0; g < d; ++g) { let m = 0; for (let w = 0; w < i; ++w) m += s.data[w * d + g]; const _ = m / i; let y = 0; for (let w = 0; w < i; ++w) y += (s.data[w * d + g] - _) ** 2; y /= i - 1; const T = Math.sqrt(y + 1e-7); for (let w = 0; w < i; ++w) { const S = w * d + g; s.data[S] = (s.data[S] - _) / T } } } let h; if (a) { const [i, d] = s.dims, g = i % u; if (g > 0) { const m = new Float32Array(d * (i + g)); m.set(s.data), m.fill(this.config.padding_value, s.data.length); const _ = i + g; s = { data: m, dims: [_, d] }, f && (h = new Tensor("int64", new BigInt64Array(_), [1, _]), h.data.fill(1n, 0, i)) } } const [p, l] = s.dims, o = this.config.stride; if (p % o !== 0) throw new Error(`The number of frames (${p}) must be a multiple of the stride (${o}).`); const e = new Tensor("float32", s.data, s.dims).view(1, Math.floor(p / o), l * o), r = { input_features: e }; if (f) { const i = e.dims[1], d = new Tensor("int64", new BigInt64Array(i), [1, i]); if (h) for (let g = 1, m = 0; g < p; g += o, ++m) d.data[m] = h.data[g]; else d.data.fill(1n); r.attention_mask = d } return r } } class ASTFeatureExtractor extends FeatureExtractor { constructor(n) { super(n); const a = this.config.sampling_rate, u = mel_filter_bank(256, this.config.num_mel_bins, 20, Math.floor(a / 2), a, null, "kaldi", !0); for (let c = 0; c < u.length; ++c) u[c].push(0); this.mel_filters = u, this.window = window_function(400, "hann", { periodic: !1 }), this.mean = this.config.mean, this.std = this.config.std } _extract_fbank_features(n, a) { return spectrogram(n, this.window, 400, 160, { fft_length: 512, power: 2, center: !1, preemphasis: .97, mel_filters: this.mel_filters, log_mel: "log", mel_floor: 1192092955078125e-22, remove_dc_offset: !0, max_num_frames: a, transpose: !0 }) } async _call(n) { validate_audio_inputs(n, "ASTFeatureExtractor"); const a = this._extract_fbank_features(n, this.config.max_length); if (this.config.do_normalize) { const u = this.std * 2; for (let c = 0; c < a.data.length; ++c) a.data[c] = (a.data[c] - this.mean) / u } return { input_values: new Tensor("float32", a.data, [1, ...a.dims]) } } } class ClapFeatureExtractor extends FeatureExtractor { constructor(n) { super(n), this.mel_filters = mel_filter_bank(this.config.nb_frequency_bins, this.config.feature_size, this.config.frequency_min, this.config.frequency_max, this.config.sampling_rate, null, "htk"), this.mel_filters_slaney = mel_filter_bank(this.config.nb_frequency_bins, this.config.feature_size, this.config.frequency_min, this.config.frequency_max, this.config.sampling_rate, "slaney", "slaney"), this.window = window_function(this.config.fft_window_size, "hann") } _get_input_mel(n, a, u, c) { let f, s = !1; const h = n.length - a; if (h > 0) if (u === "rand_trunc") { s = !0; const p = Math.floor(Math.random() * (h + 1)); n = n.subarray(p, p + a), f = this._extract_fbank_features(n, this.mel_filters_slaney, this.config.nb_max_samples), f.dims = [1, ...f.dims] } else throw new Error(`Truncation strategy "${u}" not implemented`); else { if (h < 0) { let p = new Float64Array(a); if (p.set(n), c === "repeat") for (let l = n.length; l < a; l += n.length) p.set(n.subarray(0, Math.min(n.length, a - l)), l); else if (c === "repeatpad") for (let l = n.length; l < -h; l += n.length) p.set(n, l); n = p } if (u === "fusion") throw new Error(`Truncation strategy "${u}" not implemented`); f = this._extract_fbank_features(n, this.mel_filters_slaney, this.config.nb_max_samples), f.dims = [1, ...f.dims] } return { ...f, longer: s } } _extract_fbank_features(n, a, u = null) { return spectrogram(n, this.window, this.config.fft_window_size, this.config.hop_length, { power: 2, mel_filters: a, log_mel: "dB", max_num_frames: u, do_pad: !1, transpose: !0 }) } async _call(n, { max_length: a = null } = {}) { validate_audio_inputs(n, "ClapFeatureExtractor"); const u = this._get_input_mel(n, a ?? this.config.nb_max_samples, this.config.truncation, this.config.padding); return { input_features: new Tensor("float32", u.data, [1, ...u.dims]) } } } class SpeechT5FeatureExtractor extends FeatureExtractor {} class Processor extends Callable { constructor(n) { super(), this.feature_extractor = n } async _call(n, ...a) { return await this.feature_extractor(n, ...a) } } class SamProcessor extends Processor { async _call(...n) { return await this.feature_extractor(...n) } post_process_masks(...n) { return this.feature_extractor.post_process_masks(...n) } reshape_input_points(...n) { return this.feature_extractor.reshape_input_points(...n) } } class WhisperProcessor extends Processor { async _call(n) { return await this.feature_extractor(n) } } class Wav2Vec2ProcessorWithLM extends Processor { async _call(n) { return await this.feature_extractor(n) } } class SpeechT5Processor extends Processor { async _call(n) { return await this.feature_extractor(n) } } class OwlViTProcessor extends Processor {} class AutoProcessor { static FEATURE_EXTRACTOR_CLASS_MAPPING = { ImageFeatureExtractor, WhisperFeatureExtractor, ViTFeatureExtractor, MobileViTFeatureExtractor, OwlViTFeatureExtractor, CLIPFeatureExtractor, ChineseCLIPFeatureExtractor, SiglipImageProcessor, ConvNextFeatureExtractor, ConvNextImageProcessor, SegformerFeatureExtractor, BitImageProcessor, DPTImageProcessor, DPTFeatureExtractor, GLPNFeatureExtractor, BeitFeatureExtractor, DeiTFeatureExtractor, DetrFeatureExtractor, YolosFeatureExtractor, DonutFeatureExtractor, NougatImageProcessor, ViTImageProcessor, VitMatteImageProcessor, SamImageProcessor, Swin2SRImageProcessor, Wav2Vec2FeatureExtractor, SeamlessM4TFeatureExtractor, SpeechT5FeatureExtractor, ASTFeatureExtractor, ClapFeatureExtractor }; static PROCESSOR_CLASS_MAPPING = { WhisperProcessor, Wav2Vec2ProcessorWithLM, SamProcessor, SpeechT5Processor, OwlViTProcessor }; static async from_pretrained(n, { progress_callback: a = null, config: u = null, cache_dir: c = null, local_files_only: f = !1, revision: s = "main" } = {}) { let h = u ?? await getModelJSON(n, "preprocessor_config.json", !0, { progress_callback: a, config: u, cache_dir: c, local_files_only: f, revision: s }), p = h.feature_extractor_type ?? h.image_processor_type, l = this.FEATURE_EXTRACTOR_CLASS_MAPPING[p]; if (!l) if (h.size !== void 0) console.warn(`Feature extractor type "${p}" not found, assuming ImageFeatureExtractor due to size parameter in config.`), l = ImageFeatureExtractor; else throw new Error(`Unknown Feature Extractor type: ${p}`); let o = this.PROCESSOR_CLASS_MAPPING[h.processor_class] ?? Processor, t = new l(h); return new o(t) } } env$1.allowLocalModels = !1; env$1.backends.onnx.wasm.proxy = !0; const EXAMPLE_URL = "https://images.pexels.com/photos/5965592/pexels-photo-5965592.jpeg?auto=compress&cs=tinysrgb&w=1024", status = document.getElementById("status"), fileUpload = document.getElementById("upload"), imageContainer = document.getElementById("container"), example = document.getElementById("example"); status.textContent = "Loading model..."; const model = await AutoModel.from_pretrained("briaai/RMBG-1.4", { config: { model_type: "custom" } }), processor = await AutoProcessor.from_pretrained("briaai/RMBG-1.4", { config: { do_normalize: !0, do_pad: !1, do_rescale: !0, do_resize: !0, image_mean: [.5, .5, .5], feature_extractor_type: "ImageFeatureExtractor", image_std: [1, 1, 1], resample: 2, rescale_factor: .00392156862745098, size: { width: 1024, height: 1024 } } }); status.textContent = "Ready"; example.addEventListener("click", b => { b.preventDefault(), predict(EXAMPLE_URL) }); fileUpload.addEventListener("change", function(b) { const n = b.target.files[0]; if (!n) return; const a = new FileReader; a.onload = u => predict(u.target.result), a.readAsDataURL(n) }); async function predict(b) { const n = await RawImage.fromURL(b); imageContainer.innerHTML = "", imageContainer.style.backgroundImage = `url(${b})`; const a = n.width / n.height, [u, c] = a > 720 / 480 ? [720, 720 / a] : [480 * a, 480]; imageContainer.style.width = `${u}px`, imageContainer.style.height = `${c}px`, status.textContent = "Analysing..."; const { pixel_values: f } = await processor(n), { output: s } = await model({ input: f }), h = await RawImage.fromTensor(s[0].mul(255).to("uint8")).resize(n.width, n.height), p = document.createElement("canvas"); p.width = n.width, p.height = n.height; const l = p.getContext("2d"); l.drawImage(n.toCanvas(), 0, 0); const o = l.getImageData(0, 0, n.width, n.height); for (let t = 0; t < h.data.length; ++t) o.data[4 * t + 3] = h.data[t]; l.putImageData(o, 0, 0), imageContainer.append(p), imageContainer.style.removeProperty("background-image"), imageContainer.style.background = 'url("data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQBAMAAADt3eJSAAAAAXNSR0IArs4c6QAAAARnQU1BAACxjwv8YQUAAAAGUExURb+/v////5nD/3QAAAAJcEhZcwAADsMAAA7DAcdvqGQAAAAUSURBVBjTYwABQSCglEENMxgYGAAynwRB8BEAgQAAAABJRU5ErkJggg==")', status.textContent = "Done!"; // Create download button const downloadButton = document.createElement("button"); downloadButton.textContent = "Download"; downloadButton.addEventListener("click", () => { const downloadLink = document.createElement("a"); downloadLink.href = p.toDataURL(); // Convert canvas to data URL downloadLink.download = "image.png"; // Specify download file name downloadLink.click(); // Trigger download }); imageContainer.appendChild(downloadButton); }