/** * @license * Copyright 2023 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ !function(e,t){"object"==typeof exports&&"undefined"!=typeof module?t(exports,require("@tensorflow/tfjs-core"),require("seedrandom")):"function"==typeof define&&define.amd?define(["exports","@tensorflow/tfjs-core","seedrandom"],t):t((e="undefined"!=typeof globalThis?globalThis:e||self).tf=e.tf||{},e.tf,e.seedrandom)}(this,(function(e,t,n){"use strict";function a(e){var t=Object.create(null);return e&&Object.keys(e).forEach((function(n){if("default"!==n){var a=Object.getOwnPropertyDescriptor(e,n);Object.defineProperty(t,n,a.get?a:{enumerable:!0,get:function(){return e[n]}})}})),t.default=e,t}var s=a(n);function r(e,n){Array.isArray(e)||(e=[e]),e.forEach((e=>{null!=e&&t.util.assert("complex64"!==e.dtype,(()=>`${n} does not support complex64 tensors in the CPU backend.`))}))}const o=t.kernel_impls.whereImpl;class i extends t.KernelBackend{nextDataId(){return i.nextDataId++}constructor(){super(),this.blockSize=48,this.firstUse=!0,this.data=new t.DataStorage(this,t.engine())}write(e,n,a){this.firstUse&&(this.firstUse=!1,t.env().get("IS_NODE")&&t.backend_util.warn("\n============================\nHi, looks like you are running TensorFlow.js in Node.js. To speed things up dramatically, install our node backend, visit https://github.com/tensorflow/tfjs-node for more details. \n============================"));const s={id:this.nextDataId()};return this.data.set(s,{values:e,dtype:a,refCount:1}),s}makeTensorInfo(e,n,a){let s;if("string"===n&&null!=a&&a.length>0&&t.util.isString(a[0])){const r=a.map((e=>t.util.encodeString(e)));s=this.write(r,e,n)}else s=this.write(a,e,n);return{dataId:s,shape:e,dtype:n}}refCount(e){if(this.data.has(e)){return this.data.get(e).refCount}return 0}incRef(e){this.data.get(e).refCount++}decRef(e){if(this.data.has(e)){this.data.get(e).refCount--}}move(e,t,n,a,s){this.data.set(e,{values:t,dtype:a,refCount:s})}numDataIds(){return this.data.numDataIds()}async read(e){return this.readSync(e)}readSync(e){const{dtype:n,complexTensorInfos:a}=this.data.get(e);if("complex64"===n){const e=this.readSync(a.real.dataId),n=this.readSync(a.imag.dataId);return t.backend_util.mergeRealAndImagArrays(e,n)}return t.util.convertBackendValuesAndArrayBuffer(this.data.get(e).values,n)}bufferSync(e){const n=this.readSync(e.dataId);if("string"===e.dtype)try{const a=n.map((e=>t.util.decodeString(e)));return t.buffer(e.shape,e.dtype,a)}catch(e){throw new Error("Failed to decode encoded string bytes into utf-8")}return t.buffer(e.shape,e.dtype,n)}makeOutput(e,n,a){return t.engine().makeTensorFromTensorInfo(this.makeTensorInfo(n,a,e),this)}disposeData(e,t=!1){if(this.data.has(e)){if(this.data.get(e).refCount--,!t&&this.data.get(e).refCount>0)return!1;const{complexTensorInfos:n}=this.data.get(e);null!=n&&(this.disposeData(n.real.dataId,!0),this.disposeData(n.imag.dataId,!0)),this.data.delete(e)}return!0}disposeIntermediateTensorInfo(e){this.disposeData(e.dataId)}async time(e){const n=t.util.now();e();return{kernelMs:t.util.now()-n}}memory(){return{unreliable:!0,reasons:["The reported memory is an upper bound. Due to automatic garbage collection, the true allocated memory may be less."]}}where(e){r([e],"where");const t=this.readSync(e.dataId);return o(e.shape,t)}dispose(){}floatPrecision(){return 32}epsilon(){return super.epsilon()}}function l(e){const t=new Float32Array(e.length);for(let n=0;n{const{x:n}=e.inputs,a=e.backend;r(n,"abs");let s=new Float32Array(t.util.sizeFromShape(n.shape));return s=l(a.data.get(n.dataId).values),a.makeOutput(s,n.shape,n.dtype)}};function c(e){return(n,a,s,r,o)=>{const i=t.backend_util.assertAndGetBroadcastShape(n,a),l=i.length,u=t.util.computeStrides(i),c=t.util.sizeFromShape(i),d=t.util.getTypedArrayFromDType(o,c),p=n.length,h=a.length,f=t.util.computeStrides(n),m=t.util.computeStrides(a),k=t.backend_util.getBroadcastDims(n,i),g=t.backend_util.getBroadcastDims(a,i);if(k.length+g.length===0)for(let t=0;to[e]=0));const i=t.util.locToIndex(o,p,f),c=a.slice(-h);g.forEach((e=>c[e]=0));const b=t.util.locToIndex(c,h,m);d[n]=e(s[i],r[b])}return[d,i]}}function d(e){const{inputs:t,backend:n}=e,{real:a,imag:s}=t,r=n.data.get(a.dataId).values,o=n.data.get(s.dataId).values,i=n.makeTensorInfo(a.shape,"complex64");return n.data.get(i.dataId).complexTensorInfos={real:n.makeTensorInfo(a.shape,"float32",r),imag:n.makeTensorInfo(s.shape,"float32",o)},i}const p={kernelName:t.Complex,backendName:"cpu",kernelFunc:d};function h(e,n,a="float32"){if("complex64"===a){return d({inputs:{real:h(e,n,"float32"),imag:h(e,n,"float32")},backend:e})}const s=t.util.makeZerosTypedArray(t.util.sizeFromShape(n),a);return e.makeTensorInfo(n,a,s)}function f(e){const{inputs:t,backend:n}=e,{x:a}=t;return n.incRef(a.dataId),{dataId:a.dataId,shape:a.shape,dtype:a.dtype}}const m={kernelName:t.Identity,backendName:"cpu",kernelFunc:f};function k(e){const{inputs:t,backend:n}=e,{input:a}=t,s=n.data.get(a.dataId).complexTensorInfos.real,r=n.data.get(s.dataId).values;return n.makeTensorInfo(s.shape,s.dtype,r)}const g={kernelName:t.Real,backendName:"cpu",kernelFunc:k};function b(e,n,a,s){if("int32"===s){return[n,"int32",Int32Array.from(e)]}if("bool"===s){const s=t.util.toTypedArray([0],a),[r,o]=c(((e,t)=>e!==t?1:0))(n,[],e,s,"bool");return[o,"bool",r]}throw new Error(`Error in Cast: failed to cast ${a} to ${s}`)}function I(e){const{inputs:n,backend:a,attrs:s}=e,{x:r}=n,{dtype:o}=s;if("complex64"===o){if("complex64"===r.dtype)return f({inputs:{x:r},backend:a});const e=h(a,r.shape,r.dtype),t=I({inputs:{x:r},backend:a,attrs:{dtype:"float32"}}),n=d({inputs:{real:t,imag:e},backend:a});return a.disposeIntermediateTensorInfo(e),a.disposeIntermediateTensorInfo(t),n}if("complex64"===r.dtype){const e=k({inputs:{input:r},backend:a}),t=I({inputs:{x:e},backend:a,attrs:{dtype:o}});return a.disposeIntermediateTensorInfo(e),t}if(!t.util.hasEncodingLoss(r.dtype,o)){const e=f({inputs:{x:r},backend:a});return{dataId:e.dataId,shape:e.shape,dtype:o}}const i=a.data.get(r.dataId).values,[l,u,c]=b(i,r.shape,r.dtype,o);return a.makeTensorInfo(l,u,c)}const y={kernelName:t.Cast,backendName:"cpu",kernelFunc:I};function S(e,n,a,s){return null==a?({inputs:a,backend:o})=>{const{a:i,b:l}=a,u=o;r([i,l],e);const c=u.data.get(i.dataId).values,d=u.data.get(l.dataId).values,p="string"===i.dtype?t.backend_util.fromUint8ToStringArray(c):c,h="string"===i.dtype?t.backend_util.fromUint8ToStringArray(d):d,f=s||i.dtype,[m,k]=n(i.shape,l.shape,p,h,f);return u.makeTensorInfo(k,f,m)}:({inputs:e,backend:t})=>{const{a:r,b:o}=e,i=t;if("complex64"===r.dtype||"complex64"===o.dtype){const e=I({inputs:{x:r},backend:i,attrs:{dtype:"complex64"}}),t=i.data.get(e.dataId),n=t.complexTensorInfos.real,s=t.complexTensorInfos.imag,l=i.data.get(n.dataId).values,u=i.data.get(s.dataId).values,c=I({inputs:{x:o},backend:i,attrs:{dtype:"complex64"}}),p=i.data.get(c.dataId),h=p.complexTensorInfos.real,f=p.complexTensorInfos.imag,m=i.data.get(h.dataId).values,k=i.data.get(f.dataId).values,[g,b,y]=a(r.shape,o.shape,l,u,m,k),S=i.makeTensorInfo(y,"float32",g),T=i.makeTensorInfo(y,"float32",b),N=d({inputs:{real:S,imag:T},backend:i});return i.disposeIntermediateTensorInfo(e),i.disposeIntermediateTensorInfo(c),i.disposeIntermediateTensorInfo(S),i.disposeIntermediateTensorInfo(T),N}{const e=i.data.get(r.dataId).values,t=i.data.get(o.dataId).values,a=s||r.dtype,[l,u]=n(r.shape,o.shape,e,t,a);return i.makeTensorInfo(u,a,l)}}}function T(e){return(n,a,s,r,o,i)=>{const l=t.backend_util.assertAndGetBroadcastShape(n,a),u=t.util.sizeFromShape(l),c=l.length,d=t.util.computeStrides(l),p=t.util.getTypedArrayFromDType("float32",u),h=t.util.getTypedArrayFromDType("float32",u),f=t.backend_util.getBroadcastDims(n,l),m=t.backend_util.getBroadcastDims(a,l),k=t.backend_util.mergeRealAndImagArrays(s,r),g=t.backend_util.mergeRealAndImagArrays(o,i),b=n.length,I=t.util.computeStrides(n),y=a.length,S=t.util.computeStrides(a);if(f.length+m.length===0)for(let t=0;ts[e]=0));const r=t.util.locToIndex(s,b,I),o=a.slice(-y);m.forEach((e=>o[e]=0));const i=t.util.locToIndex(o,y,S),l=e(k[2*r],k[2*r+1],g[2*i],g[2*i+1]);p[n]=l.real,h[n]=l.imag}return[p,h,l]}}const N=c(((e,t)=>e+t)),x=T(((e,t,n,a)=>({real:e+n,imag:t+a}))),v=S(t.Add,N,x),F={kernelName:t.Add,backendName:"cpu",kernelFunc:v};function w(e,n,a,s,r){const o=t.util.sizeFromShape(s),i=t.util.makeZerosTypedArray(r,a);for(let t=0;t=r||(i[a]+=o>0?n[t]:1)}return i}function M(e,n,a,s=!1){const r=e.shape[0],o=e.shape[1],i=t.buffer([r,a],n.dtype);for(let t=0;t=a||(s?i.set(1,t,o):n.size>0?i.set(i.get(t,o)+n.get(t,r),t,o):i.set(i.get(t,o)+1,t,o))}return i}const A=c(((e,t)=>e&t)),D=S(t.BitwiseAnd,A),E={kernelName:t.BitwiseAnd,backendName:"cpu",kernelFunc:D};function _(e){return(n,a,s)=>{const r=t.util.getArrayFromDType(a,n.length);for(let t=0;t{const{x:l}=s;r(l,e);const u=i,c=u.data.get(l.dataId).values;let d;if("string"===l.dtype){if(!Array.isArray(c))throw new Error("String tensor's value was not an instance of Array");d=t.backend_util.fromUint8ToStringArray(c)}else d=c;const p=a||l.dtype,h=n(d,p,o);return u.makeTensorInfo(l.shape,p,h)}}const W=_((e=>Math.ceil(e))),P=R(t.Ceil,W),C={kernelName:t.Ceil,backendName:"cpu",kernelFunc:P};function H(e,n,a,s){const r=t.util.getArrayFromDType(a,t.util.sizeFromShape(n));if(s&&"string"!==a){let n=0;e.forEach((e=>{const a=t.util.sizeFromShape(e.shape);r.set(e.vals,n),n+=a}))}else{let s=0;e.forEach((e=>{const o="string"===a?t.backend_util.fromUint8ToStringArray(e.vals):e.vals;let i=0;for(let t=0;te===t?1:0)),B=S(t.Equal,O,null,"bool"),$={kernelName:t.Equal,backendName:"cpu",kernelFunc:B},V=_((e=>Math.exp(e))),G=R(t.Exp,V,"float32"),L={kernelName:t.Exp,backendName:"cpu",kernelFunc:G},q=_((e=>Math.expm1(e))),U=R(t.Expm1,q),Z={kernelName:t.Expm1,backendName:"cpu",kernelFunc:U},j=_((e=>Math.floor(e))),K=R(t.Floor,j),Y={kernelName:t.Floor,backendName:"cpu",kernelFunc:K},J=c(((e,t)=>Math.floor(e/t))),Q=S(t.FloorDiv,J,null,"int32"),X={kernelName:t.FloorDiv,backendName:"cpu",kernelFunc:Q};function ee(e,n,a,s,r,o,i,l,u){const c=t.buffer([s,o],a);for(let t=0;t=u/o)throw new Error(`Invalid indices: ${a} does not index into ${l}`);for(let e=0;ee>t?1:0)),ae=S(t.Greater,ne,null,"bool"),se={kernelName:t.Greater,backendName:"cpu",kernelFunc:ae},re=c(((e,t)=>e>=t?1:0)),oe=S(t.GreaterEqual,re,null,"bool"),ie={kernelName:t.GreaterEqual,backendName:"cpu",kernelFunc:oe},le=c(((e,t)=>ee<=t?1:0)),pe=S(t.LessEqual,de,null,"bool"),he={kernelName:t.LessEqual,backendName:"cpu",kernelFunc:pe};function fe(e,n,a){const s=(n-e)/(a-1),r=t.util.makeZerosTypedArray(a,"float32");r[0]=e;for(let e=1;eMath.log(e))),ke=R(t.Log,me),ge={kernelName:t.Log,backendName:"cpu",kernelFunc:ke};function be(e,n,a,s){const r=t.util.getTypedArrayFromDType(s,t.util.sizeFromShape(a));for(let t=0;ts)&&(s=n)}r[t]=s}return r}const Ie=c(((e,t)=>Math.max(e,t))),ye=S(t.Maximum,Ie),Se={kernelName:t.Maximum,backendName:"cpu",kernelFunc:ye},Te=c(((e,t)=>Math.min(e,t))),Ne=S(t.Minimum,Te),xe={kernelName:t.Minimum,backendName:"cpu",kernelFunc:Ne},ve=c(((e,t)=>e*t)),Fe=T(((e,t,n,a)=>({real:e*n-t*a,imag:e*a+t*n}))),we=S(t.Multiply,ve,Fe),Me={kernelName:t.Multiply,backendName:"cpu",kernelFunc:we};function Ae(e,n,a){const s=t.util.createScalarValue(-1,a);return ve([],n,s,e,a)}const De={kernelName:t.Neg,backendName:"cpu",kernelFunc:function(e){const{inputs:t,backend:n}=e,{x:a}=t;r(a,"neg");const s=n.data.get(a.dataId).values,[o,i]=Ae(s,a.shape,a.dtype);return n.makeTensorInfo(i,a.dtype,o)}},Ee=c(((e,t)=>e!==t?1:0)),_e=S(t.NotEqual,Ee,null,"bool"),ze={kernelName:t.NotEqual,backendName:"cpu",kernelFunc:_e};function Re(e,n,a,s,r){const o=n.length,i=t.util.sizeFromShape(n),l=t.util.computeStrides(n),u=t.util.computeStrides(r),c=t.util.getTypedArrayFromDType(a,t.util.sizeFromShape(r));for(let n=0;na.disposeIntermediateTensorInfo(e))),a.makeTensorInfo(I,b,k)}};function Oe(e,t,n,a){const s=[];let r=0;const o=t.length-1+n.length,i=new Array(o).fill(null).map((()=>[0]));!function(e,t){for(let n=0;ns)throw new Error("Ragged splits must not point past values");for(let e=1;ea[e])throw new Error("Ragged splits must be sorted in ascending order")}}(n,a);let l=1;for(let e=0;e=0){const e=i[s],t=e[e.length-1]-a[o];for(let e=o;e{if(e<0||e>=a){const r=t.util.indexToLoc(s,n.length,t.util.computeStrides(n)).join(",");throw new Error(`indices[${r}] = ${e} is not in [0, ${a})`)}}))}(o,i,n[0][0]-1),0===s.length)throw new Error("params.rank must be nonzero");const u=s[0],{outSplits:c,valueSlices:d,numValues:p}=Oe(o,i,e,u),h=function(e){const n=[];for(let a=0;ar[t]=e))}return n}(c),f=$e(a,s,r,d,p);return[h,f[0],f[1]]}const Ge=2147483647;function Le(e,n,a,s,r,o,i){if(n.length>1)throw new Error("starts must be a scalar or vector");if(r.length>1)throw new Error("limits must be a scalar or vector");if(i.length>1)throw new Error("deltas must be a scalar or vector");const l=0===n.length,u=0===r.length,c=0===i.length,d=[];l||d.push(n[0]),u||d.push(r[0]),c||d.push(i[0]);for(let e=1;e0&&an)i=0;else if(i=Math.ceil(Math.abs((a-n)/r)),i>Ge)throw new Error("Requires ((limit - start) / delta) <= 2147483647");h[t+1]=h[t]+i}const f=h[p],m=t.util.getArrayFromDType(a,f);let k=0;for(let t=0;tn&&(n=t)}return n}static getMaxWidthValueRowID(e){const t=e.length;if(0===t)return 0;let n=0,a=e[0],s=0;for(let r=1;r"Final length of result must be equal to firstDimension.")),r}calculateOutputIndexRowSplit(e,t,n,a){const s=e.length,r=[];for(let o=0;o0&&r.length!==e[s-1])throw new Error("Invalid row split size.");return r}calculateOutputIndexValueRowID(e,t,n,a){const s=e.length,r=[];if(0===s)return[];let o=0,i=e[0];if(i>=t.length)throw new Error(`Got currentValueRowId=${i}, which is not less than ${t.length}`);let l=t[i];r.push(l);for(let u=1;u=0&&(++o,o=t.length)throw new Error(`Got nextValueRowId=${s} which is not less than ${t.length}`);l=t[s]}r.push(l)}if(r.length!==e.length)throw new Error("Invalid row ids.");return r}calculateOutputIndex(e,t,n,a){const s=this.getRowPartitionTensor(e),r=this.getRowPartitionTypeByDimension(e);switch(r){case qe.VALUE_ROWIDS:return this.calculateOutputIndexValueRowID(s,t,n,a);case qe.ROW_SPLITS:if(s.length-1>t.length)throw new Error(`Row partition size is greater than output size: ${s.length-1} > ${t.length}`);return this.calculateOutputIndexRowSplit(s,t,n,a);default:throw new Error(`Unsupported partition type: ${qe[r]}`)}}getFirstDimensionSize(){const e=this.rowPartitionValues[0];if(0===this.rowPartitionTypes.length)throw new Error("No row_partition_types given.");const t=this.rowPartitionTypes[0];switch(t){case qe.FIRST_DIM_SIZE:return e[0];case qe.VALUE_ROWIDS:throw new Error("Cannot handle VALUE_ROWIDS in first dimension.");case qe.ROW_SPLITS:return this.rowPartitionValuesShapes[0][0]-1;default:throw new Error(`Cannot handle type ${qe[t]}`)}}compute(){if(this.rowPartitionValues[0].length<=0)throw new Error("Invalid first partition input. Tensor requires at least one element.");const e=this.getFirstDimensionSize(),n=this.calculateOutputSize(e),a=new Array(this.raggedRank+1);a[a.length-1]=1;for(let e=a.length-2;e>=0;--e)a[e]=a[e+1]*n[e+1];const s=je(n,!1),r=t.util.getArrayFromDType(this.valuesDType,t.util.sizeFromShape(s));if(a[0]*n[0]>0){let t=this.calculateFirstParentOutputIndex(e,a[0],n[0]);for(let e=1;e<=this.raggedRank;++e){t=this.calculateOutputIndex(e-1,t,a[e],n[e])}this.setOutput(this.raggedRank,t,r,s)}return[s,r]}setOutput(e,n,a,s){if(0===a.length)return;const r=this.values,o=a;let i=s.slice();i=i.slice(e+1);const l=t.util.sizeFromShape(i),u=n.length;let c=this.defaultValue;if(c.length!==l&&1!==c.length){const e=this.defaultValueShape;t.tidy((()=>{const n=t.reshape(c,e),a=t.broadcastTo(n,i);c=a.dataSync()}))}let d=0,p=0,h=0;for(let e=0;e<=u;++e){let t=e=u){const e=a.length;t=Math.floor(e/l)}if(t>h)if(1===this.defaultValue.length)o.subarray(h*l,t*l).fill(this.defaultValue[0]),h=t;else for(;t>h;){Ze(o.slice(h*l),c,l),++h}t<0?(d=e+1,p=h):(d=e,p=h,h=p+1)}else++h}}}function Ze(e,t,n){for(let a=0;a= 0`);if(a<-1)throw new Error(`Dimension ${a} must be >= -1`);a=-1}n.push(a)}return n}function Ke(e,t,n,a,s,r,o,i,l,u){return new Ue(e,t,n,a,s,r,o,i,l,u).compute()}function Ye(e,n,a,s){if(e===n||e1)return t.util.makeZerosTypedArray(0,s);const r=Math.abs(Math.ceil((n-e)/a)),o=t.util.makeZerosTypedArray(r,s);n1/Math.sqrt(e))),Qe=R(t.Rsqrt,Je),Xe={kernelName:t.Rsqrt,backendName:"cpu",kernelFunc:Qe};function et(e,n,a,s,r,o,i,l,u,c){const d=[s/r,r],p=e.values,h=n.values;if(0===s)return t.buffer(a,n.dtype);const f=u instanceof t.TensorBuffer?u:t.buffer(d,n.dtype);"string"==typeof u||"number"==typeof u?f.values.fill(u):"boolean"==typeof u&&f.values.fill(+u);for(let e=0;e=s/r)throw new Error(`Invalid indices: ${t} does not index into ${a}`);for(let t=0;t1/(1+Math.exp(-e)))),nt=z(t.Sigmoid,(e=>1/(1+Math.exp(-e)))),at={kernelName:t.Sigmoid,backendName:"cpu",kernelFunc:nt};function st(e,n,a,s,r){const o=t.slice_util.isSliceContinous(s,n,a),i=t.util.sizeFromShape(a),l=t.util.computeStrides(s);if(o){const a=t.slice_util.computeFlatOffset(n,l);return"string"===r?e.slice(a,a+i):e.subarray(a,a+i)}const u="string"===r?t.backend_util.fromUint8ToStringArray(e):e,c=t.buffer(s,r,u),d=t.buffer(a,r);for(let e=0;ee+n[t]));d.set(c.get(...a),...t)}return"string"===r?t.backend_util.fromStringArrayToUint8(d.values):d.values}function rt(e){const{inputs:n,backend:a,attrs:s}=e,{x:o}=n,{begin:i,size:l}=s;r(o,"slice");const[u,c]=t.slice_util.parseSliceParams(o,i,l);t.slice_util.assertParamsValid(o,u,c);const d=st(a.data.get(o.dataId).values,u,c,o.shape,o.dtype);return a.makeTensorInfo(c,o.dtype,d)}const ot={kernelName:t.Slice,backendName:"cpu",kernelFunc:rt};function it(e,n,a,s,r,o,i){const l=n[0],u=o[0],c=new Array(u),d=new Array(l),p=n[1];if(0===u){if(0!==l)throw new Error(t.backend_util.getSparseFillEmptyRowsIndicesDenseShapeMismatch(l));return[t.util.getArrayFromDType(a,0),[0,p],t.util.getArrayFromDType(r,0),c,d]}let h=!0,f=0;const m=new Array(u).fill(0);for(let n=0;n=u)throw new Error(t.backend_util.getSparseFillEmptyRowsOutOfRangeIndexErrorMessage(n,a,u));++m[a],h=h&&a>=f,f=a}let k=!0;for(let e=0;e0&&(m[e]+=m[e-1])}if(k&&h){const t=e,n=s;for(let e=0;e0){h[p-1]=1;for(let e=p-2;e>=0;--e)h[e]=h[e+1]*s[e+1]}const f=[];if(l>0){f[l-1]=1;for(let e=l-2;e>=0;--e)f[e]=f[e+1]*u[e+1]}const m=t.util.getArrayFromDType(a,i*l);for(let t=0;t0?r[l-1]+1:0;if(d<0)throw new Error(t.backend_util.getSparseSegmentReductionNegativeSegmentIdsErrorMessage());const p=n.slice();p[0]=d;const h=p.reduce(((e,t)=>e*t),1),f=t.util.getArrayFromDType(a,h);if(0===l)return d>0&&f.fill(i),[f,p];if(d<=0)throw new Error(t.backend_util.getSparseSegmentReductionNegativeSegmentIdsErrorMessage());let m=0,k=1,g=0,b=r[m];for(;;){let n=0;if(k=n)throw new Error(t.backend_util.getSparseSegmentReductionNonIncreasingSegmentIdsErrorMessage())}if(b<0||b>=d)throw new Error(t.backend_util.getSparseSegmentReductionSegmentIdOutOfRangeErrorMessage(b,d));b>g&&f.fill(i,g*c,b*c);for(let n=m;n=u[0])throw new Error(t.backend_util.getSparseSegmentReductionIndicesOutOfRangeErrorMessage(n,s[n],u[0]));for(let t=0;tl)break}return gMath.sqrt(e))),dt=z(t.Sqrt,(e=>Math.sqrt(e))),pt={kernelName:t.Sqrt,backendName:"cpu",kernelFunc:dt},ht=c(((e,t)=>{const n=e-t;return n*n})),ft=S(t.SquaredDifference,ht),mt={kernelName:t.SquaredDifference,backendName:"cpu",kernelFunc:ft},kt=_(((e,t)=>{const{pattern:n,replaceGlobal:a,rewrite:s}=t;return e.replace(new RegExp(n,a?"g":""),s)})),gt=R(t.StaticRegexReplace,kt),bt={kernelName:t.StaticRegexReplace,backendName:"cpu",kernelFunc:gt};function It(e,n,a,s){const r=t.buffer(e,n.dtype);for(let e=0;e0?0:o-i);let p=0;p+=l*this.leftPad.length;for(let t=0;te.forEach((e=>h[f++]=e));for(let e=0;e0){m(e[d+c-1]);for(let e=0;e0){let e=n[0];if(0!==e)throw new Error(`First split value must be 0, got ${e}`);for(let t=1;t=e;if(s=s&&n[t]<=a,!s)throw new Error(`Invalid split value ${n[t]}, must be in [${e}, ${a}]`);e=n[t]}if(e!==a)throw new Error(`Last split value must be data size. Expected ${a}, got ${e}`)}const r=s-1,o=t.util.getArrayFromDType("int32",s);if(0===a||0===s){const e=new Array(a);for(let e=0;e<=r;++e)o[e]=0;return[e,o]}o[0]=0;for(let e=1;e<=r;++e){const t=n[e]-n[e-1];let a=0;this.nGramWidths.forEach((e=>{a+=this.getNumNGrams(t,e)})),this.preserveShort&&t>0&&0===a&&(a=1),o[e]=o[e-1]+a}const i=new Array(o[r]);for(let t=0;t{const o=n[t+1]-n[t],l=this.getNumNGrams(o,r);this.createNGrams(e,a,i,s,l,r),s+=l})),this.preserveShort&&s===o[t]){const r=n[t+1]-n[t];if(0===r)continue;const o=r+2*this.padWidth,l=1;this.createNGrams(e,a,i,s,l,o)}}return[i,o]}}function St(e,t,n,a,s,r,o,i){return new yt(n,a,s,r,o,i).compute(e,t)}function Tt(e,t,n,a){if(!e.length)return;if(0===t.length){for(let t=0;te-t)),Ft=T(((e,t,n,a)=>({real:e-n,imag:t-a}))),wt=S(t.Sub,vt,Ft),Mt={kernelName:t.Sub,backendName:"cpu",kernelFunc:wt};function At(e,n){const a=new Array(e.rank);for(let t=0;t{const n=t.value-e.value;return 0===n?e.index-t.index:n};function Et(e,n,a=0,s=e.length-1){for(;s>a;){if(s-a>600){const t=s-a+1,r=n-a+1,o=Math.log(t),i=.5*Math.exp(2*o/3),l=.5*Math.sqrt(o*i*(t-i)/t)*Math.sign(r-t/2);Et(e,n,Math.max(a,Math.floor(n-r*i/t+l)),Math.min(s,Math.floor(n+(t-r)*i/t+l)))}const r=e[n];let o=a,i=s;for(t.util.swap(e,a,n),Dt(e[s],r)>0&&t.util.swap(e,a,s);o0;)i-=1}0===Dt(e[a],r)?t.util.swap(e,a,i):(i+=1,t.util.swap(e,i,s)),i<=n&&(a=i+1),n<=i&&(s=i-1)}}function _t(e,n,a,s,r){const o=n[n.length-1],[i,l]=[e.length/o,o],u=t.util.getTypedArrayFromDType(a,i*s),c=t.util.getTypedArrayFromDType("int32",i*s);for(let t=0;to[t]={value:e,index:t})),s{for(let n=0;nnew i),1);const Wt=z(t.Elu,(e=>e>=0?e:Math.exp(e)-1)),Pt={kernelName:t.Elu,backendName:"cpu",kernelFunc:Wt};function Ct(e){const{inputs:n,backend:a,attrs:s}=e,{x:o}=n,{alpha:i}=s;r([o],"leakyRelu");const l=t.util.sizeFromShape(o.shape),u=a.data.get(o.dataId).values,c=t.util.getTypedArrayFromDType("float32",l);for(let e=0;ee<0?t*e:e));function Bt(e){const{inputs:t,backend:n}=e,{x:a,alpha:s}=t;r([a,s],"prelu");const o=n.data.get(a.dataId).values,i=n.data.get(s.dataId).values,[l,u]=Ot(a.shape,s.shape,o,i,"float32");return n.makeTensorInfo(u,"float32",l)}const $t={kernelName:t.Prelu,backendName:"cpu",kernelFunc:Bt},Vt=z(t.Relu,(e=>Math.max(0,e))),Gt={kernelName:t.Relu,backendName:"cpu",kernelFunc:Vt},Lt=z(t.Relu6,(e=>Math.min(Math.max(0,e),6))),qt={kernelName:t.Relu6,backendName:"cpu",kernelFunc:Lt};function Ut(e,t,n,a,s){if("linear"===n)return f({inputs:{x:t},backend:e});if("relu"===n)return Vt({inputs:{x:t},backend:e});if("elu"===n)return Wt({inputs:{x:t},backend:e});if("relu6"===n)return Lt({inputs:{x:t},backend:e});if("prelu"===n)return Bt({inputs:{x:t,alpha:a},backend:e});if("leakyrelu"===n)return Ct({inputs:{x:t},backend:e,attrs:{alpha:s}});if("sigmoid"===n)return nt({inputs:{x:t},backend:e});throw new Error(`Activation ${n} has not been implemented for the CPU backend.`)}function Zt(e){const{inputs:n,backend:a,attrs:s}=e,{x:r}=n,{shape:o}=s,i=t.util.sizeFromShape(r.shape),l=t.util.inferFromImplicitShape(o,i),u=t.util.sizeFromShape(l);t.util.assert(i===u,(()=>`The new shape (${l}) has ${u} elements and the old shape (${r.shape}) has ${i} elements. The new shape and old shape must have the same number of elements.`)),a.incRef(r.dataId);const c=a.data.get(r.dataId);if(null!=c.complexTensorInfos){const e=c.complexTensorInfos.real,t=c.complexTensorInfos.imag;e.shape=l,t.shape=l}return{dataId:r.dataId,shape:l,dtype:r.dtype}}const jt={kernelName:t.Reshape,backendName:"cpu",kernelFunc:Zt};function Kt(e){const{inputs:n,backend:a,attrs:s}=e,{a:o,b:i}=n,{transposeA:l,transposeB:u}=s;r([o,i],"matMul");const c=o.shape.length,d=i.shape.length,p=l?o.shape[c-2]:o.shape[c-1],h=u?i.shape[d-1]:i.shape[d-2],f=l?o.shape[c-1]:o.shape[c-2],m=u?i.shape[d-2]:i.shape[d-1],k=o.shape.slice(0,-2),g=i.shape.slice(0,-2),b=t.util.sizeFromShape(k),I=t.util.sizeFromShape(g),y=t.broadcast_util.assertAndGetBroadcastShape(o.shape.slice(0,-2),i.shape.slice(0,-2)).concat([f,m]);t.util.assert(p===h,(()=>`Error in matMul: inner shapes (${p}) and (${h}) of Tensors with shapes ${o.shape} and ${i.shape} and transposeA=${l} and transposeB=${u} must match.`));const S=u?[I,m,h]:[I,h,m],T=Zt({inputs:{x:o},backend:a,attrs:{shape:l?[b,p,f]:[b,f,p]}}),N=Zt({inputs:{x:i},backend:a,attrs:{shape:S}}),x=l?T.shape[1]:T.shape[2],v=l?T.shape[2]:T.shape[1],F=u?N.shape[1]:N.shape[2],w=Math.max(b,I),M=a.data.get(T.dataId).values,A=a.data.get(N.dataId).values,D=t.util.computeStrides(T.shape),E=t.util.computeStrides(N.shape),[_,z,R]=l?[D[0],1,D[1]]:[D[0],D[1],1],[W,P,C]=u?[1,E[1],E[0]]:[E[1],1,E[0]],H=v*F,O=t.buffer([w,v,F],T.dtype),B=O.values,$=a.blockSize;for(let e=0;eMath.acos(e))),Xt={kernelName:t.Acos,backendName:"cpu",kernelFunc:Qt},en=z(t.Acosh,(e=>Math.acosh(e))),tn={kernelName:t.Acosh,backendName:"cpu",kernelFunc:en};const nn={kernelName:t.AddN,backendName:"cpu",kernelFunc:function(e){const{inputs:n,backend:a}=e,s=n;r(n,"addN");const o=s.map((e=>a.data.get(e.dataId).values)),i=t.buffer(s[0].shape,s[0].dtype),l=i.values;for(let e=0;en&&(n=s,a=e)}m[e]=a}return d.forEach((e=>a.disposeIntermediateTensorInfo(e))),a.makeTensorInfo(p,"int32",m)}};const on={kernelName:t.ArgMin,backendName:"cpu",kernelFunc:function(e){const{inputs:n,backend:a,attrs:s}=e,{x:o}=n,{axis:i}=s;r(o,"argMin");let l=t.util.parseAxisParam(i,o.shape);const u=t.backend_util.getAxesPermutation(l,o.shape.length);let c=o;const d=[];null!=u&&(c=We({inputs:{x:o},backend:a,attrs:{perm:u}}),d.push(c),l=t.backend_util.getInnerMostAxes(l.length,c.shape.length)),l=[l[0]],t.backend_util.assertAxesAreInnerMostDims("argMin",l,c.shape.length);const[p,h]=t.backend_util.computeOutAndReduceShapes(c.shape,l),f=t.util.sizeFromShape(p),m=t.util.makeZerosTypedArray(f,"int32"),k=t.util.sizeFromShape(h),g=a.data.get(c.dataId).values;for(let e=0;ea.disposeIntermediateTensorInfo(e))),a.makeTensorInfo(p,"int32",m)}},ln=z(t.Asin,(e=>Math.asin(e))),un={kernelName:t.Asin,backendName:"cpu",kernelFunc:ln},cn=z(t.Asinh,(e=>Math.asinh(e))),dn={kernelName:t.Asinh,backendName:"cpu",kernelFunc:cn},pn=z(t.Atan,(e=>Math.atan(e))),hn={kernelName:t.Atan,backendName:"cpu",kernelFunc:pn},fn=c(((e,t)=>Math.atan2(e,t))),mn=S(t.Atan2,fn),kn={kernelName:t.Atan2,backendName:"cpu",kernelFunc:mn},gn=z(t.Atanh,(e=>Math.atanh(e))),bn={kernelName:t.Atanh,backendName:"cpu",kernelFunc:gn};function In(e,n,a,s,r,o){const i=r.strideHeight,l=r.strideWidth,u=r.dilationHeight,c=r.dilationWidth,d=r.effectiveFilterHeight,p=r.effectiveFilterWidth,h=r.padInfo.top,f=r.padInfo.left,m="max"===o?Number.NEGATIVE_INFINITY:Number.POSITIVE_INFINITY,k=t.buffer(r.outShape,a),g=k.values,b=r.outShape[1]*r.outShape[2]*r.outShape[3],I=r.outShape[2]*r.outShape[3],y=r.outShape[3];for(let t=0;tk?k=a:"avg"===o&&(b+=a,I++)}if(isNaN(k))break}g[N+n*y+t]="avg"===o?b/I:k}}}return k}function yn(e,n,a,s,r=!1,o=!1){const i=t.buffer(s.outShape,"int32"),l=s.strideHeight,u=s.strideWidth,c=s.dilationHeight,d=s.dilationWidth,p=s.effectiveFilterHeight,h=s.effectiveFilterWidth,f=s.padInfo.top,m=s.padInfo.left,k=t.buffer(n,a,e);for(let e=0;ey&&(y=u,S=r?o?((e*s.inHeight+n)*s.inWidth+a)*s.inChannels+t:(n*s.inWidth+a)*s.inChannels+t:i*h+l)}}i.set(S,e,n,l,t)}}return i}function Sn(e,n,a,s,r,o){const i=r.strideDepth,l=r.strideHeight,u=r.strideWidth,c=r.dilationDepth,d=r.dilationHeight,p=r.dilationWidth,h=r.effectiveFilterDepth,f=r.effectiveFilterHeight,m=r.effectiveFilterWidth,k=r.padInfo.front,g=r.padInfo.top,b=r.padInfo.left,I="max"===o?Number.NEGATIVE_INFINITY:Number.POSITIVE_INFINITY,y=t.buffer(r.outShape,a),S=y.values,T=r.outShape[1]*r.outShape[2]*r.outShape[3]*r.outShape[4],N=r.outShape[2]*r.outShape[3]*r.outShape[4],x=r.outShape[3]*r.outShape[4],v=r.outShape[4];for(let t=0;tT?T=r:"avg"===o&&(N+=r,x++),isNaN(T))break}if(isNaN(T))break}if(isNaN(T))break}S[g+t]="avg"===o?N/Math.max(x,1):T}}}}return y}const Tn={kernelName:t.AvgPool,backendName:"cpu",kernelFunc:function(e){const{inputs:n,backend:a,attrs:s}=e,{x:o}=n;r(o,"avgPool");const{filterSize:i,strides:l,pad:u,dimRoundingMode:c}=s;t.util.assert(t.backend_util.eitherStridesOrDilationsAreOne(l,1),(()=>`Error in avgPool: Either strides or dilations must be 1. Got strides ${l} and dilations '1'`));const d=t.backend_util.computePool2DInfo(o.shape,i,l,1,u,c);let p;if(1===d.filterWidth&&1===d.filterHeight&&t.util.arraysEqual(d.inShape,d.outShape))p=f({inputs:{x:o},backend:a});else{const e=a.data.get(o.dataId).values,n=t.util.computeStrides(o.shape),s=In(e,o.shape,o.dtype,n,d,"avg");p=a.makeTensorInfo(d.outShape,o.dtype,s.values)}return p}};const Nn={kernelName:t.AvgPool3D,backendName:"cpu",kernelFunc:function(e){const{inputs:n,backend:a,attrs:s}=e,{x:o}=n,{filterSize:i,strides:l,pad:u,dimRoundingMode:c,dataFormat:d}=s;r(o,"avgPool3d");const p=t.backend_util.computePool3DInfo(o.shape,i,l,1,u,c,d),h=Sn(a.data.get(o.dataId).values,o.shape,o.dtype,t.util.computeStrides(o.shape),p,"avg");return a.makeTensorInfo(h.shape,"float32",h.values)}};const xn={kernelName:t.AvgPool3DGrad,backendName:"cpu",kernelFunc:function(e){const{inputs:n,backend:a,attrs:s}=e,{dy:o,input:i}=n,{filterSize:l,strides:u,pad:c,dimRoundingMode:d}=s;r([o,i],"avgPool3DGrad");const p=t.backend_util.computePool3DInfo(i.shape,l,u,1,c,d),h=p.strideDepth,f=p.strideHeight,m=p.strideWidth,k=p.filterDepth,g=p.filterHeight,b=p.filterWidth,I=p.dilationDepth,y=p.dilationHeight,S=p.dilationWidth,T=p.effectiveFilterDepth,N=p.effectiveFilterHeight,x=p.effectiveFilterWidth,v=T-1-p.padInfo.front,F=x-1-p.padInfo.left,w=N-1-p.padInfo.top,M=t.buffer(i.shape,"float32"),A=1/(k*g*b),D=a.bufferSync(o);for(let e=0;e=p.outDepth||Math.floor(a)!==a))for(let n=0;n=p.outHeight||Math.floor(s)!==s))for(let n=0;n=p.outWidth||Math.floor(r)!==r)continue;l+=D.get(e,a,s,r,t)}}}M.set(l*A,e,n,a,s,t)}return a.makeTensorInfo(M.shape,M.dtype,M.values)}};const vn={kernelName:t.AvgPoolGrad,backendName:"cpu",kernelFunc:function(e){const{inputs:n,backend:a,attrs:s}=e,{dy:o,input:i}=n,l=i;r([o,i],"avgPoolGrad");const{filterSize:u,strides:c,pad:d}=s,p=t.backend_util.computePool2DInfo(l.shape,u,c,1,d),h=p.strideHeight,f=p.strideWidth,m=p.filterHeight,k=p.filterWidth,g=p.dilationHeight,b=p.dilationWidth,I=p.effectiveFilterHeight,y=p.effectiveFilterWidth,S=y-1-p.padInfo.left,T=I-1-p.padInfo.top,N=t.buffer(l.shape,"float32"),x=1/(m*k),v=a.data.get(o.dataId).values,F=t.buffer(o.shape,"float32",v);for(let e=0;e=p.outHeight||Math.floor(a)!==a))for(let n=0;n=p.outWidth||Math.floor(s)!==s)continue;o+=F.get(e,a,s,t)}}N.set(o*x,e,n,a,t)}return a.makeTensorInfo(N.shape,N.dtype,N.values)}};const Fn={kernelName:t.FusedBatchNorm,backendName:"cpu",kernelFunc:function(e){const{inputs:n,backend:a,attrs:s}=e,{x:o,scale:i,offset:l,mean:u,variance:c}=n;t.util.assert(u.shape.length===c.shape.length,(()=>"Batch normalization gradient requires mean and variance to have equal ranks.")),t.util.assert(null==l||u.shape.length===l.shape.length,(()=>"Batch normalization gradient requires mean and offset to have equal ranks.")),t.util.assert(null==i||u.shape.length===i.shape.length,(()=>"Batch normalization gradient requires mean and scale to have equal ranks.")),r([o,u,c,i,l],"batchNorm");let{varianceEpsilon:d}=s;null==d&&(d=.001);const p=a.data.get(o.dataId).values,h=a.data.get(u.dataId).values,f=a.data.get(c.dataId).values,m=i?a.data.get(i.dataId).values:new Float32Array([1]),k=l?a.data.get(l.dataId).values:new Float32Array([0]),g=new Float32Array(p.length),b=k.length,I=m.length,y=f.length,S=h.length;let T=0,N=0,x=0,v=0;for(let e=0;e=b&&(T=0),N>=S&&(N=0),x>=I&&(x=0),v>=y&&(v=0);return a.makeTensorInfo(o.shape,o.dtype,g)}};const wn={kernelName:t.BatchToSpaceND,backendName:"cpu",kernelFunc:function(e){const{inputs:n,backend:a,attrs:s}=e,{x:o}=n,{blockShape:i,crops:l}=s;r([o],"batchToSpaceND");const u=i.reduce(((e,t)=>e*t)),c=t.backend_util.getReshaped(o.shape,i,u),d=t.backend_util.getPermuted(c.length,i.length),p=t.backend_util.getReshapedPermuted(o.shape,i,u),h=t.backend_util.getSliceBeginCoords(l,i.length),f=t.backend_util.getSliceSize(p,l,i.length),m=Zt({inputs:{x:o},backend:a,attrs:{shape:c}}),k=We({inputs:{x:m},backend:a,attrs:{perm:d}}),g=Zt({inputs:{x:k},backend:a,attrs:{shape:p}}),b=rt({inputs:{x:g},backend:a,attrs:{begin:h,size:f}});return a.disposeIntermediateTensorInfo(m),a.disposeIntermediateTensorInfo(k),a.disposeIntermediateTensorInfo(g),b}};const Mn={kernelName:t.Bincount,backendName:"cpu",kernelFunc:function(e){const{inputs:t,backend:n,attrs:a}=e,{x:s,weights:r}=t,{size:o}=a,i=w(n.data.get(s.dataId).values,n.data.get(r.dataId).values,r.dtype,r.shape,o);return n.makeTensorInfo([o],r.dtype,i)}};const An={kernelName:t.BroadcastArgs,backendName:"cpu",kernelFunc:function(e){const{inputs:n,backend:a}=e,{s0:s,s1:r}=n,o=a.data.get(s.dataId).values,i=a.data.get(r.dataId).values,l=t.backend_util.assertAndGetBroadcastShape(Array.from(o),Array.from(i));return a.makeTensorInfo([l.length],"int32",Int32Array.from(l))}},Dn=z(t.ClipByValue,((e,t)=>{const n=t;return e>n.clipValueMax?n.clipValueMax:e{const{x:n}=e.inputs,a=e.backend,s=new Float32Array(t.util.sizeFromShape(n.shape)),r=a.data.get(n.dataId),o=r.complexTensorInfos.real,i=r.complexTensorInfos.imag,l=a.data.get(o.dataId).values,u=a.data.get(i.dataId).values;for(let e=0;ee.shape));t.backend_util.assertParamsConsistent(i,o);let l=t.backend_util.computeOutShape(n.map((e=>e.shape)),o);if(0===t.util.sizeFromShape(l))return a.makeTensorInfo(l,n[0].dtype,[]);const u=n.filter((e=>t.util.sizeFromShape(e.shape)>0));if(1===u.length)return f({inputs:{x:u[0]},backend:a});if("complex64"===u[0].dtype){const e=u.map((e=>k({inputs:{input:e},backend:a}))),t=u.map((e=>zn({inputs:{input:e},backend:a}))),n=Wn({inputs:e,backend:a,attrs:{axis:o}}),s=Wn({inputs:t,backend:a,attrs:{axis:o}}),r=d({inputs:{real:n,imag:s},backend:a});return e.forEach((e=>a.disposeIntermediateTensorInfo(e))),t.forEach((e=>a.disposeIntermediateTensorInfo(e))),a.disposeIntermediateTensorInfo(n),a.disposeIntermediateTensorInfo(s),r}const c=u.map((e=>{const n=t.util.sizeFromShape(e.shape.slice(o));return Zt({inputs:{x:e},backend:a,attrs:{shape:[-1,n]}})})),p=c.map((e=>({vals:a.data.get(e.dataId).values,shape:e.shape})));l=t.backend_util.computeOutShape(c.map((e=>e.shape)),1);const h=1===c[0].shape[0],m=H(p,l,n[0].dtype,h),g=t.backend_util.computeOutShape(u.map((e=>e.shape)),o),b=a.makeTensorInfo(g,n[0].dtype,m);return c.forEach((e=>a.disposeIntermediateTensorInfo(e))),b}const Pn={kernelName:t.Concat,backendName:"cpu",kernelFunc:Wn};function Cn(e){const{inputs:n,backend:a,attrs:s}=e,{x:o,filter:i}=n,{strides:l,pad:u,dataFormat:c,dilations:d,dimRoundingMode:p}=s;r([o,i],"conv2d");const h=t.backend_util.convertConv2DDataFormat(c),f=t.backend_util.computeConv2DInfo(o.shape,i.shape,l,d,u,p,!1,h),m=f.filterHeight,k=f.filterWidth,g=f.dilationHeight,b=f.dilationWidth,I=f.padInfo.left,y=f.padInfo.top,S="channelsLast"===f.dataFormat,T=new t.TensorBuffer(f.outShape,o.dtype),N=t.util.computeStrides(o.shape),x=t.util.computeStrides(i.shape),v=N[0],F=S?N[1]:N[2],w=S?N[2]:1,M=S?1:N[1],A=T.strides[0],D=S?T.strides[1]:T.strides[2],E=S?T.strides[2]:1,_=S?1:T.strides[1],z=a.data.get(o.dataId).values,R=a.data.get(i.dataId).values,W=T.values;for(let e=0;e=f.inHeight)continue;const r=e*x[0],o=t+n*F;for(let e=0;e=f.inWidth)continue;const s=o+a*w;let i=r+e*x[1];for(let e=0;e=d.inDepth)continue;const r=e*w[0],o=t+n*F[1];for(let e=0;e=d.inHeight)continue;const s=r+e*w[1],i=o+a*F[2];for(let e=0;e=d.inWidth)continue;const r=s+e*w[2],o=i+t*d.inChannels;let l=r;for(let e=0;eMath.cos(e))),qn={kernelName:t.Cos,backendName:"cpu",kernelFunc:Ln},Un=z(t.Cosh,(e=>Math.cosh(e))),Zn={kernelName:t.Cosh,backendName:"cpu",kernelFunc:Un};const jn={kernelName:t.CropAndResize,backendName:"cpu",kernelFunc:function(e){const{inputs:n,backend:a,attrs:s}=e,{image:r,boxes:o,boxInd:i}=n,{cropSize:l,method:u,extrapolationValue:c}=s,[d,p,h,f]=r.shape,m=o.shape[0],[k,g]=l,b=t.buffer([m,k,g,f],"float32"),I=a.data.get(o.dataId).values,y=a.data.get(i.dataId).values,S=a.data.get(r.dataId).values,T=t.util.computeStrides(r.shape),N=t.util.computeStrides(b.shape);for(let e=0;e=d)continue;const i=k>1?(s-n)*(p-1)/(k-1):0,l=g>1?(r-a)*(h-1)/(g-1):0;for(let t=0;t1?n*(p-1)+t*i:.5*(n+s)*(p-1);if(d<0||d>p-1)for(let n=0;n1?a*(h-1)+u*l:.5*(a+r)*(h-1);if(d<0||d>h-1){for(let n=0;n1?a*(h-1)+n*l:.5*(a+r)*(h-1);if(s<0||s>h-1){for(let a=0;ae+k-t-1:(e,t)=>e+t;for(let e=0;ee+k-t-1:(e,t)=>e+t;for(let e=0;e`Only NHWC dataFormat supported on CPU for depthToSpace. Got ${i}`));const l=r.shape[0],u=r.shape[1],c=r.shape[2],d=r.shape[3],p=u*o,h=c*o,f=d/(o*o),m=a.data.get(r.dataId).values,k=new Float32Array(l*p*h*f);let g=0;for(let e=0;e`Error in depthwiseConv2d: Either strides or dilations must be 1. Got strides ${l} and dilations '${f}'`));const m=t.backend_util.computeConv2DInfo(o.shape,i.shape,l,f,u,d,!0),{filterHeight:k,filterWidth:g,dilationHeight:b,dilationWidth:I,padInfo:y}=m,S=y.left,T=y.top,N=m.outChannels/m.inChannels,x=new t.TensorBuffer(m.outShape,o.dtype),v=a.data.get(o.dataId).values,F=a.data.get(i.dataId).values,w=x.values;for(let e=0;e=m.inHeight)continue;const r=e*h[0],o=t+n*p[1];for(let e=0;e=m.inWidth)continue;const s=r+e*h[1],i=o+a*m.inChannels;let l=t,u=s;for(let e=0;e{const{x:s,filter:r}=e,{strides:o,pad:i,dilations:l}=a,u=n,c=u.data.get(s.dataId).values,d=s.shape.length,p=u.data.get(r.dataId).values,h=r.shape.length,{batchSize:f,inHeight:m,inWidth:k,inChannels:g,outHeight:b,outWidth:I,padInfo:y,strideHeight:S,strideWidth:T,filterHeight:N,filterWidth:x,dilationHeight:v,dilationWidth:F,outShape:w}=t.backend_util.computeDilation2DInfo(s.shape,r.shape,o,i,"NHWC",l),M=t.util.sizeFromShape(w),A=w.length,D=t.util.getArrayFromDType(s.dtype,M);for(let e=0;e=0&&o=0&&fu&&(u=k)}}}D[t.util.locToIndex([e,n,o,l],A,t.util.computeStrides(w))]=u}}}return{dataId:u.write(t.util.toTypedArray(D,s.dtype),w,s.dtype),shape:w,dtype:s.dtype}}},ra={kernelName:t.Dilation2DBackpropFilter,backendName:"cpu",kernelFunc:({inputs:e,backend:n,attrs:a})=>{const{x:s,filter:r,dy:o}=e,{strides:i,pad:l,dilations:u}=a,c=n,d=t.util.toNestedArray(s.shape,c.data.get(s.dataId).values),p=t.util.toNestedArray(r.shape,c.data.get(r.dataId).values),{batchSize:h,inHeight:f,inWidth:m,inChannels:k,outHeight:g,outWidth:b,padInfo:I,strideHeight:y,strideWidth:S,filterHeight:T,filterWidth:N,dilationHeight:x,dilationWidth:v,outShape:F}=t.backend_util.computeDilation2DInfo(s.shape,r.shape,i,l,"NHWC",u);t.util.assert(o.rank===F.length,(()=>`Error in ${t.Dilation2DBackpropFilter}, dy must have the same rank as output ${F.length}, but got ${o.rank}`));const w=t.util.toNestedArray(F,c.data.get(o.dataId).values),M=t.util.makeZerosNestedTypedArray(r.shape,r.dtype);for(let e=0;e=0&&a=0&&uo&&(o=s,i=t,l=n)}}}M[i][l][r]+=w[e][t][a][r]}}}return{dataId:c.write(t.util.toTypedArray(M,s.dtype),r.shape,r.dtype),shape:r.shape,dtype:r.dtype}}},oa={kernelName:t.Dilation2DBackpropInput,backendName:"cpu",kernelFunc:({inputs:e,backend:n,attrs:a})=>{const{x:s,filter:r,dy:o}=e,{strides:i,pad:l,dilations:u}=a,c=n,d=t.util.toNestedArray(s.shape,c.data.get(s.dataId).values),p=t.util.toNestedArray(r.shape,c.data.get(r.dataId).values),{batchSize:h,inHeight:f,inWidth:m,inChannels:k,outHeight:g,outWidth:b,padInfo:I,strideHeight:y,strideWidth:S,filterHeight:T,filterWidth:N,dilationHeight:x,dilationWidth:v,outShape:F}=t.backend_util.computeDilation2DInfo(s.shape,r.shape,i,l,"NHWC",u);t.util.assert(o.rank===F.length,(()=>`Error in ${t.Dilation2DBackpropInput}, dy must have the same rank as output ${F.length}, but got ${o.rank}`));const w=t.util.toNestedArray(F,c.data.get(o.dataId).values),M=t.util.makeZerosNestedTypedArray(s.shape,s.dtype);for(let e=0;e=0&&a=0&&uo&&(o=s,i=a,l=u)}}}M[e][i][l][r]+=w[e][t][a][r]}}}return{dataId:c.write(t.util.toTypedArray(M,s.dtype),s.shape,s.dtype),shape:s.shape,dtype:s.dtype}}};const ia={kernelName:t.Draw,backendName:"cpu",kernelFunc:function(e){const{inputs:t,backend:n,attrs:a}=e,{image:s}=t,{canvas:r,options:o}=a,{contextOptions:i,imageOptions:l}=o||{},u=(null==l?void 0:l.alpha)||1,c=(null==i?void 0:i.contextType)||"2d";if("2d"!==c)throw new Error(`Context type ${i.contextType} is not supported by the CPU backend.`);const d=r.getContext(c,(null==i?void 0:i.contextAttributes)||{});if(null==d)throw new Error(`Could not get the context with ${c} type.`);const[p,h]=s.shape.slice(0,2),f=2===s.shape.length?1:s.shape[2],m=n.data.get(s.dataId).values,k="float32"===s.dtype?255:1,g=new Uint8ClampedArray(h*p*4);for(let e=0;e1)throw new Error(`Tensor values for a float32 Tensor must be in the range [0 - 1] but encountered ${a}.`)}else if("int32"===s.dtype&&(a<0||a>255))throw new Error(`Tensor values for a int32 Tensor must be in the range [0 - 255] but encountered ${a}.`);1===f?(t[0]=a*k,t[1]=a*k,t[2]=a*k):t[n]=a*k}const n=4*e;g[n+0]=Math.round(t[0]),g[n+1]=Math.round(t[1]),g[n+2]=Math.round(t[2]),g[n+3]=Math.round(t[3])}r.width=h,r.height=p;const b=new ImageData(g,h,p);return d.putImageData(b,0,0),s}};function la(e){const{inputs:n,backend:a,attrs:s}=e,{x:o}=n,{axis:i,keepDims:l}=s;let u;r(o,"sum"),u="bool"===o.dtype?I({inputs:{x:o},backend:a,attrs:{dtype:"int32"}}):f({inputs:{x:o},backend:a});const c=u.shape.length,d=t.util.parseAxisParam(i,u.shape),p=t.backend_util.getAxesPermutation(d,c);let m=d,k=u;null!=p&&(k=We({inputs:{x:u},backend:a,attrs:{perm:p}}),m=t.backend_util.getInnerMostAxes(m.length,c)),t.backend_util.assertAxesAreInnerMostDims("sum",m,k.shape.length);const[g,b]=t.backend_util.computeOutAndReduceShapes(k.shape,m);let y=h(a,g,t.backend_util.upcastType(k.dtype,"int32"));const S=t.util.sizeFromShape(b),T=a.data.get(y.dataId).values,N=a.data.get(k.dataId).values;for(let e=0;e=0&&(h=la({inputs:{x:h},backend:a,attrs:{axis:c[e]-(i.length-f),keepDims:!1}}),m.push(h)),f--)}for(const e of m)e!==h&&a.disposeIntermediateTensorInfo(e);return h}};const da={kernelName:t.EluGrad,backendName:"cpu",kernelFunc:function(e){const{inputs:n,backend:a}=e,{dy:s,y:o}=n;r([s,o],"eluGrad");const i=new Float32Array(t.util.sizeFromShape(o.shape)),l=a.data.get(o.dataId).values,u=a.data.get(s.dataId).values;for(let e=0;e=0?u[e]:u[e]*(t+1)}return a.makeTensorInfo(o.shape,"float32",i)}},pa=t.backend_util.ERF_P,ha=t.backend_util.ERF_A1,fa=t.backend_util.ERF_A2,ma=t.backend_util.ERF_A3,ka=t.backend_util.ERF_A4,ga=t.backend_util.ERF_A5,ba=z(t.Erf,(e=>{const t=Math.sign(e),n=Math.abs(e),a=1/(1+pa*n);return t*(1-((((ga*a+ka)*a+ma)*a+fa)*a+ha)*a*Math.exp(-n*n))})),Ia={kernelName:t.Erf,backendName:"cpu",kernelFunc:ba};function ya(e){const{inputs:n,backend:a,attrs:s}=e,{input:r}=n,{dim:o}=s,i=r.shape.length,l=r.shape.slice();let u=o;return o<0&&(t.util.assert(-(i+1)<=o,(()=>`Axis must be in the interval [${-(i+1)}, ${i}]`)),u=i+o+1),l.splice(u,0,1),Zt({inputs:{x:r},backend:a,attrs:{shape:l}})}const Sa={kernelName:t.ExpandDims,backendName:"cpu",kernelFunc:ya},Ta=c(((e,t)=>e/t)),Na=S(t.RealDiv,Ta),xa={kernelName:t.RealDiv,backendName:"cpu",kernelFunc:Na};function va(e,n,a){const s=e.shape,r=s[0],o=s[1],i=a.data.get(e.dataId),l=i.complexTensorInfos.real,u=i.complexTensorInfos.imag,c=[r,o],p=t.util.sizeFromShape(c),h=t.util.getTypedArrayFromDType("float32",p),f=t.util.getTypedArrayFromDType("float32",p);for(let e=0;e{const{image:s}=e,r=a,o=t.util.getTypedArrayFromDType(s.dtype,t.util.sizeFromShape(s.shape)),[i,l,u,c]=s.shape,d=r.data.get(s.dataId).values;for(let e=0;e=0&&r=0,(()=>`GatherV2: the index value ${n} is not in [0, ${p-1}]`))}let h=u;null==u&&(h=0);const f=t.util.sizeFromShape(i.shape),m=t.backend_util.segment_util.collectGatherOpShapeInfo(o,i,c,h),k=Zt({inputs:{x:o},backend:a,attrs:{shape:[m.batchSize,m.outerSize,m.dimSize,m.sliceSize]}}),g=Zt({inputs:{x:i},backend:a,attrs:{shape:[m.batchSize,f/m.batchSize]}}),b=[m.batchSize,m.outerSize,f/m.batchSize,m.sliceSize],I=a.bufferSync(g),y=te(a.bufferSync(k),I,b);return a.disposeIntermediateTensorInfo(k),a.disposeIntermediateTensorInfo(g),a.makeTensorInfo(m.outputShape,y.dtype,y.values)}};const Pa={kernelName:t.IFFT,backendName:"cpu",kernelFunc:function(e){const{inputs:n,backend:a}=e,{input:s}=n,r=t.util.sizeFromShape(s.shape),o=s.shape[s.shape.length-1],i=Zt({inputs:{x:s},backend:a,attrs:{shape:[r/o,o]}}),l=va(i,!0,a),u=Zt({inputs:{x:l},backend:a,attrs:{shape:s.shape}});return a.disposeIntermediateTensorInfo(i),a.disposeIntermediateTensorInfo(l),u}},Ca=z(t.IsFinite,(e=>Number.isFinite(e)?1:0),"bool"),Ha={kernelName:t.IsFinite,backendName:"cpu",kernelFunc:Ca},Oa=z(t.IsInf,(e=>Math.abs(e)===1/0?1:0),"bool"),Ba={kernelName:t.IsInf,backendName:"cpu",kernelFunc:Oa},$a=z(t.IsNan,(e=>Number.isNaN(e)?1:0),"bool"),Va={kernelName:t.IsNan,backendName:"cpu",kernelFunc:$a};const Ga={kernelName:t.LinSpace,backendName:"cpu",kernelFunc:function(e){const{backend:t,attrs:n}=e,{start:a,stop:s,num:r}=n,o=fe(a,s,r);return t.makeTensorInfo([o.length],"float32",o)}},La=z(t.Log1p,(e=>Math.log1p(e))),qa={kernelName:t.Log1p,backendName:"cpu",kernelFunc:La},Ua=c(((e,t)=>e&&t)),Za=S(t.LogicalAnd,Ua,null,"bool"),ja={kernelName:t.LogicalAnd,backendName:"cpu",kernelFunc:Za},Ka=z(t.LogicalNot,(e=>e?0:1),"bool"),Ya={kernelName:t.LogicalNot,backendName:"cpu",kernelFunc:Ka},Ja=c(((e,t)=>e||t)),Qa=S(t.LogicalOr,Ja,null,"bool"),Xa={kernelName:t.LogicalOr,backendName:"cpu",kernelFunc:Qa};const es={kernelName:t.LRN,backendName:"cpu",kernelFunc:function(e){const{inputs:n,backend:a,attrs:s}=e,{x:o}=n,{depthRadius:i,bias:l,alpha:u,beta:c}=s;r(o,"LRN");const d=o.shape[3],p=d-1,h=a.data.get(o.dataId).values,f=t.util.sizeFromShape(o.shape),m=new Float32Array(f);function k(e){const t=e%d;let n=e-t+Math.max(0,t-i);const a=e-t+Math.min(t+i,p);let s=0;for(;n<=a;n++){const e=h[n];s+=e*e}return s}for(let e=0;e`Error in maxPool: Either strides or dilations must be 1. Got strides ${l} and dilations '1'`));const d=t.backend_util.computePool2DInfo(o.shape,i,l,1,u,c);let p;if(1===d.filterWidth&&1===d.filterHeight&&t.util.arraysEqual(d.inShape,d.outShape))p=f({inputs:{x:o},backend:a});else{const e=a.data.get(o.dataId).values,n=t.util.computeStrides(o.shape),s=In(e,o.shape,o.dtype,n,d,"max");p=a.makeTensorInfo(d.outShape,o.dtype,s.values)}return p}};const rs={kernelName:t.MaxPool3D,backendName:"cpu",kernelFunc:function(e){const{inputs:n,backend:a,attrs:s}=e,{x:o}=n,{filterSize:i,strides:l,pad:u,dimRoundingMode:c,dataFormat:d}=s;r(o,"maxPool3d");const p=t.backend_util.computePool3DInfo(o.shape,i,l,1,u,c,d),h=Sn(a.data.get(o.dataId).values,o.shape,o.dtype,t.util.computeStrides(o.shape),p,"max");return a.makeTensorInfo(h.shape,"float32",h.values)}};const os={kernelName:t.MaxPool3DGrad,backendName:"cpu",kernelFunc:function(e){const{inputs:n,backend:a,attrs:s}=e,{dy:o,input:i}=n,{filterSize:l,strides:u,pad:c,dimRoundingMode:d}=s;r([o,i],"maxPool3DGrad");const p=t.backend_util.computePool3DInfo(i.shape,l,u,1,c,d),h=function(e,n){const a=t.buffer(n.outShape,"int32"),s=n.strideDepth,r=n.strideHeight,o=n.strideWidth,i=n.dilationDepth,l=n.dilationHeight,u=n.dilationWidth,c=n.effectiveFilterDepth,d=n.effectiveFilterHeight,p=n.effectiveFilterWidth,h=n.padInfo.front,f=n.padInfo.top,m=n.padInfo.left;for(let t=0;t=x&&(x=l,v=a*d*p+r*d+i)}}}a.set(v,t,g,s,r,k)}}}return a}(a.bufferSync(i),p),f=p.strideDepth,m=p.strideHeight,k=p.strideWidth,g=p.dilationDepth,b=p.dilationHeight,I=p.dilationWidth,y=p.effectiveFilterDepth,S=p.effectiveFilterHeight,T=p.effectiveFilterWidth,N=y-1-p.padInfo.front,x=T-1-p.padInfo.left,v=S-1-p.padInfo.top,F=t.buffer(i.shape,"float32"),w=a.bufferSync(o);for(let e=0;e=p.outDepth||Math.floor(a)!==a))for(let s=0;s=p.outHeight||Math.floor(r)!==r))for(let o=0;o=p.outWidth||Math.floor(u)!==u)continue;const c=y*S*T-1-h.get(e,a,r,u,t)===n*S*T+s*T+o?1:0;if(0===c)continue;l+=w.get(e,a,r,u,t)*c}}}F.set(l,e,n,a,s,t)}return a.makeTensorInfo(F.shape,F.dtype,F.values)}};const is={kernelName:t.MaxPoolGrad,backendName:"cpu",kernelFunc:function(e){const{inputs:n,backend:a,attrs:s}=e,{dy:o,input:i,output:l}=n,u=i;r([i,l],"maxPoolGrad");const{filterSize:c,strides:d,pad:p,dimRoundingMode:h}=s,f=t.backend_util.computePool2DInfo(u.shape,c,d,1,p,h),m=a.data.get(u.dataId).values,k=t.buffer(f.outShape,u.dtype,yn(m,u.shape,u.dtype,f).values),g=f.strideHeight,b=f.strideWidth,I=f.dilationHeight,y=f.dilationWidth,S=f.effectiveFilterHeight,T=f.effectiveFilterWidth,N=T-1-f.padInfo.left,x=S-1-f.padInfo.top,v=t.buffer(u.shape,"float32"),F=a.data.get(o.dataId).values,w=t.buffer(o.shape,"float32",F);for(let e=0;e=f.outHeight||Math.floor(a)!==a))for(let s=0;s=f.outWidth||Math.floor(i)!==i)continue;const l=S*T-1-k.get(e,a,i,t)===n*T+s?1:0;if(0===l)continue;o+=w.get(e,a,i,t)*l}}v.set(o,e,n,a,t)}return a.makeTensorInfo(v.shape,v.dtype,v.values)}};const ls={kernelName:t.MaxPoolWithArgmax,backendName:"cpu",kernelFunc:({inputs:e,attrs:n,backend:a})=>{const{x:s}=e,{filterSize:o,strides:i,pad:l,includeBatchInIndex:u}=n,c=a;r(s,"MaxPoolWithArgmax");const d=c.data.get(s.dataId).values,p=t.backend_util.computePool2DInfo(s.shape,o,i,[1,1],l),[h,f]=function(e,n,a,s,r){const o=In(e,0,a,t.util.computeStrides(n),r,"max"),i=yn(e,n,a,r,!0,s);return[o.values,i.values]}(d,s.shape,s.dtype,u,p),m=c.write(h,p.outShape,s.dtype),k=c.write(f,p.outShape,s.dtype);return[{dataId:m,shape:p.outShape,dtype:s.dtype},{dataId:k,shape:p.outShape,dtype:"int32"}]}};const us={kernelName:t.Mean,backendName:"cpu",kernelFunc:function(e){const{inputs:n,backend:a,attrs:s}=e,{x:r}=n,{axis:o,keepDims:i}=s,l=t.util.parseAxisParam(o,r.shape),u=t.backend_util.computeOutAndReduceShapes(r.shape,l)[1],c=t.util.sizeFromShape(u),d=[],p=a.makeTensorInfo([],"float32",new Float32Array([c]));d.push(p);const h=I({inputs:{x:r},backend:a,attrs:{dtype:"float32"}});d.push(h);const f=Na({inputs:{a:h,b:p},backend:a});d.push(f);const m=la({inputs:{x:f},backend:a,attrs:{axis:o,keepDims:i}});return d.forEach((e=>a.disposeIntermediateTensorInfo(e))),m}};const cs={kernelName:t.Min,backendName:"cpu",kernelFunc:function(e){const{inputs:n,backend:a,attrs:s}=e,{x:o}=n,{axis:i,keepDims:l}=s;r(o,"min");const u=t.util.parseAxisParam(i,o.shape);let c=u;const d=t.backend_util.getAxesPermutation(c,o.shape.length);let p=o;null!=d&&(p=We({inputs:{x:o},backend:a,attrs:{perm:d}}),c=t.backend_util.getInnerMostAxes(c.length,o.shape.length)),t.backend_util.assertAxesAreInnerMostDims("min",c,p.shape.length);const[h,f]=t.backend_util.computeOutAndReduceShapes(p.shape,c),m=t.util.sizeFromShape(f),k=t.util.makeZerosTypedArray(t.util.sizeFromShape(h),p.dtype),g=a.data.get(p.dataId).values;for(let e=0;ee[0]+o.shape[t]+e[1])),c=i.map((e=>e[0])),d=i.map(((e,t)=>e[0]+o.shape[t])),p="reflect"===l?0:1,h=a.data.get(o.dataId).values,f=o.shape.length,m=t.util.computeStrides(o.shape),k=t.util.sizeFromShape(u),g=u.length,b=t.util.computeStrides(u),I=t.util.getTypedArrayFromDType(o.dtype,k);for(let e=0;e=d[e]&&(n[e]=2*(d[e]-1)-n[e]+p);n=n.map(((e,t)=>e-c[t]));const a=t.util.locToIndex(n,f,m);I[e]=h[a]}return{dataId:a.write(I,u,o.dtype),shape:u,dtype:o.dtype}}},ps=c(((e,t)=>{const n=e%t;return e<0&&t<0||e>=0&&t>=0?n:(n+t)%t})),hs=S(t.Mod,ps),fs={kernelName:t.Mod,backendName:"cpu",kernelFunc:hs};function ms(e){const{inputs:n,backend:a,attrs:s}=e,{logits:r}=n,{dim:o}=s,i=r.shape.length;let l=o;if(-1===l&&(l=i-1),l!==i-1)throw Error(`Softmax along a non-last dimension is not yet supported. Logits was rank ${i} and dim was ${l}`);const u=t.util.parseAxisParam([l],r.shape),c=ns({inputs:{x:r},backend:a,attrs:{reductionIndices:u,keepDims:!1}}),d=t.backend_util.expandShapeToKeepDim(c.shape,u),p=Zt({inputs:{x:c},backend:a,attrs:{shape:d}}),h=wt({inputs:{a:r,b:p},backend:a}),f=G({inputs:{x:h},backend:a}),m=la({inputs:{x:f},backend:a,attrs:{axis:u,keepDims:!1}}),k=Zt({inputs:{x:m},backend:a,attrs:{shape:d}}),g=Na({inputs:{a:f,b:k},backend:a});return a.disposeIntermediateTensorInfo(c),a.disposeIntermediateTensorInfo(p),a.disposeIntermediateTensorInfo(h),a.disposeIntermediateTensorInfo(f),a.disposeIntermediateTensorInfo(m),a.disposeIntermediateTensorInfo(k),g}const ks={kernelName:t.Softmax,backendName:"cpu",kernelFunc:ms};const gs={kernelName:t.Multinomial,backendName:"cpu",kernelFunc:function(e){const{inputs:n,backend:a,attrs:o}=e,{logits:i}=n,{numSamples:l,seed:u,normalized:c}=o;r(i,"multinomial");const d=c?i:ms({inputs:{logits:i},backend:a,attrs:{dim:-1}}),p=d.shape[0],h=d.shape[1],f=a.data.get(d.dataId).values,m=[p,l],k=t.util.makeZerosTypedArray(t.util.sizeFromShape(m),"int32");for(let e=0;e=0&&h[e]{t.util.assertShapesMatch(o,e.shape,"All tensors passed to stack must have matching shapes"),t.util.assert(i===e.dtype,(()=>"All tensors passed to stack must have matching dtypes"))}));const l=[],u=Wn({inputs:n.map((e=>{const t=ya({inputs:{input:e},backend:a,attrs:{dim:r}});return l.push(t),t})),backend:a,attrs:{axis:r}});return l.forEach((e=>a.disposeIntermediateTensorInfo(e))),u}const As={kernelName:t.Pack,backendName:"cpu",kernelFunc:Ms};const Ds={kernelName:t.PadV2,backendName:"cpu",kernelFunc:function(e){const{inputs:n,backend:a,attrs:s}=e,{x:o}=n,{paddings:i,constantValue:l}=s;r(o,"pad");const u=i.map(((e,t)=>e[0]+o.shape[t]+e[1])),c=i.map((e=>e[0])),d=a.data.get(o.dataId).values,p=t.util.sizeFromShape(o.shape),h=o.shape.length,f=t.util.computeStrides(o.shape),m=t.util.sizeFromShape(u),k=u.length,g=t.util.computeStrides(u),b=t.util.getTypedArrayFromDType(o.dtype,m);0!==l&&b.fill(l);for(let e=0;ee+c[t]));b[t.util.locToIndex(n,k,g)]=d[e]}return{dataId:a.write(b,u,o.dtype),shape:u,dtype:o.dtype}}},Es=c(((e,t)=>Math.pow(e,t))),_s=S(t.Pow,Es),zs={kernelName:t.Pow,backendName:"cpu",kernelFunc:_s};const Rs={kernelName:t.RaggedGather,backendName:"cpu",kernelFunc:function(e){const{inputs:t,backend:n,attrs:a}=e,{paramsNestedSplits:s,paramsDenseValues:r,indices:o}=t,i=s.map((e=>n.data.get(e.dataId).values)),l=s.map((e=>e.shape)),u=n.data.get(r.dataId).values,c=n.data.get(o.dataId).values,[d,p,h]=Ve(i,l,u,r.shape,r.dtype,c,o.shape),f=d.map((e=>n.makeTensorInfo([e.length],"int32",e))),m=n.makeTensorInfo(h,r.dtype,p);return f.concat([m])}};const Ws={kernelName:t.RaggedRange,backendName:"cpu",kernelFunc:function(e){const{inputs:t,backend:n}=e,{starts:a,limits:s,deltas:r}=t,o=n.data.get(a.dataId).values,i=n.data.get(s.dataId).values,l=n.data.get(r.dataId).values,[u,c]=Le(o,a.shape,a.dtype,i,s.shape,l,r.shape);return[n.makeTensorInfo([u.length],"int32",u),n.makeTensorInfo([c.length],a.dtype,c)]}};const Ps={kernelName:t.RaggedTensorToTensor,backendName:"cpu",kernelFunc:function(e){const{inputs:t,backend:n,attrs:a}=e,{shape:s,values:r,defaultValue:o,rowPartitionTensors:i}=t,{rowPartitionTypes:l}=a,u=n.data.get(s.dataId).values,c=n.data.get(r.dataId).values,d=n.data.get(o.dataId).values,p=i.map((e=>n.data.get(e.dataId).values)),h=i.map((e=>e.shape)),[f,m]=Ke(u,s.shape,c,r.shape,r.dtype,d,o.shape,p,h,l);return n.makeTensorInfo(f,r.dtype,m)}};const Cs={kernelName:t.Range,backendName:"cpu",kernelFunc:function(e){const{backend:t,attrs:n}=e,{start:a,stop:s,dtype:r,step:o}=n,i=Ye(a,s,o,r);return t.makeTensorInfo([i.length],r,i)}},Hs=z(t.Reciprocal,(e=>1/e)),Os={kernelName:t.Reciprocal,backendName:"cpu",kernelFunc:Hs};const Bs={kernelName:t.ResizeBilinear,backendName:"cpu",kernelFunc:function(e){const{inputs:n,backend:a,attrs:s}=e,{images:o}=n,{alignCorners:i,halfPixelCenters:l,size:u}=s;r(o,"resizeBilinear");const c=t.util.computeStrides(o.shape),[d,p]=u,[h,f,m,k]=o.shape,g=a.data.get(o.dataId).values,b=new Float32Array(t.util.sizeFromShape([h,d,p,k])),I=[i&&d>1?f-1:f,i&&p>1?m-1:m],y=[i&&d>1?d-1:d,i&&p>1?p-1:p];let S=0;const T=I[0]/y[0],N=I[1]/y[1];for(let e=0;e1?d-1:d,l&&m>1?p-1:p],b=[l&&f>1?f-1:f,l&&m>1?m-1:m],I=g[0]/b[0],y=g[1]/b[1],S=a.data.get(i.dataId).values;let T=0;for(let e=0;e1?f-1:f,i&&p>1?m-1:m],y=[i&&d>1?d-1:d,i&&p>1?p-1:p],S=I[0]/y[0],T=I[1]/y[1];let N=0;for(let e=0;e1?p-1:p,l&&k>1?h-1:h],y=[l&&m>1?m-1:m,l&&k>1?k-1:k],S=I[0]/y[0],T=I[1]/y[1],N=1/S,x=1/T,v=2*Math.ceil(N)+2,F=2*Math.ceil(x)+2;for(let e=0;e=m)continue;const d=t+u*c[1],f=u*S;if(e===Math.min(p-1,l?Math.round(f):Math.floor(f)))for(let e=0;e=k)continue;const s=d+t*c[2],r=t*T;a===Math.min(h-1,l?Math.round(r):Math.floor(r))&&(o+=b[s+n])}}g[r+n]=o}}}}return a.makeTensorInfo(o.shape,o.dtype,g)}};const Ls={kernelName:t.Reverse,backendName:"cpu",kernelFunc:function(e){const{inputs:n,backend:a,attrs:s}=e,{x:o}=n,{dims:i}=s;r(o,"reverse");const l=o.shape.length,u=t.util.parseAxisParam(i,o.shape);if(0===l)return f({inputs:{x:o},backend:a});const c=new t.TensorBuffer(o.shape,o.dtype),d=a.bufferSync(o);for(let e=0;en[e]=o.shape[e]-1-n[e])),c.set(d.get(...n),...t)}return a.makeTensorInfo(c.shape,c.dtype,c.values)}},qs={kernelName:t.RotateWithOffset,backendName:"cpu",kernelFunc:({inputs:e,attrs:n,backend:a})=>{const{image:s}=e,{radians:r,fillValue:o,center:i}=n,l=a,u=t.util.getTypedArrayFromDType(s.dtype,t.util.sizeFromShape(s.shape)),[c,d,p,h]=s.shape,[f,m]=t.backend_util.getImageCenter(i,d,p),k=Math.sin(r),g=Math.cos(r),b=l.data.get(s.dataId).values;for(let e=0;e=0&&y=0&&S{const t=Math.floor(e);return e-t<.5?Math.floor(e):e-t>.5?Math.ceil(e):t%2==0?t:t+1})),Zs={kernelName:t.Round,backendName:"cpu",kernelFunc:Us};const js={kernelName:t.ScatterNd,backendName:"cpu",kernelFunc:function(e){const{inputs:n,backend:a,attrs:s}=e,{indices:r,updates:o}=n,{shape:i}=s,{sliceRank:l,numUpdates:u,sliceSize:c,strides:d,outputSize:p}=t.backend_util.calculateShapes(o,r,i),h=et(a.bufferSync(r),a.bufferSync(o),i,p,c,u,l,d,0,!0);return a.makeTensorInfo(i,h.dtype,h.values)}};function Ks(e,t){let n=0,a=e.length,s=0;for(;n1||1===o.shape.length?1:t.util.sizeFromShape(o.shape.slice(1));for(let e=0;ee>=0?er*e:Xs*(Math.exp(e)-1))),nr={kernelName:t.Selu,backendName:"cpu",kernelFunc:tr},ar=z(t.Sign,(e=>e<0?-1:e>0?1:0)),sr={kernelName:t.Sign,backendName:"cpu",kernelFunc:ar},rr=z(t.Sin,(e=>Math.sin(e))),or={kernelName:t.Sin,backendName:"cpu",kernelFunc:rr},ir=z(t.Sinh,(e=>Math.sinh(e))),lr={kernelName:t.Sinh,backendName:"cpu",kernelFunc:ir},ur=Math.log(1.1920928955078125e-7)+2,cr=z(t.Softplus,(e=>{const t=e>-ur,n=eNumber(e))))),n.makeTensorInfo([m.length],a.dtype,new Int32Array(m))]}};const fr={kernelName:t.SparseReshape,backendName:"cpu",kernelFunc:function(e){const{inputs:t,backend:n}=e,{inputIndices:a,inputShape:s,newShape:r}=t;if(2!==a.shape.length)throw new Error(`Input indices should be a matrix but received shape\n ${a.shape}`);if(1!==s.shape.length)throw new Error(`Input shape should be a vector but received shape\n ${s.shape}`);if(1!==r.shape.length)throw new Error(`Target shape should be a vector but received shape ${r.shape}`);const o=Array.from(n.data.get(s.dataId).values),i=n.data.get(a.dataId).values,l=Array.from(n.data.get(r.dataId).values),[u,c,d]=lt(i,a.shape,a.dtype,o,l);return[n.makeTensorInfo(c,a.dtype,u),n.makeTensorInfo([d.length],r.dtype,new Int32Array(d))]}};const mr={kernelName:t.SparseSegmentMean,backendName:"cpu",kernelFunc:function(e){const{inputs:t,backend:n}=e,{data:a,indices:s,segmentIds:r}=t;if(a.shape.length<1)throw new Error("Data should be at least 1 dimensional but received scalar");if(1!==s.shape.length)throw new Error(`Indices should be a vector but received shape\n ${s.shape}`);if(1!==r.shape.length)throw new Error(`Segment ids should be a vector but received shape\n ${r.shape}`);if(s.shape[0]!==r.shape[0])throw new Error("segmentIds and indices should have same size.");const o=n.data.get(a.dataId).values,i=n.data.get(s.dataId).values,l=n.data.get(r.dataId).values,[u,c]=ut(o,a.shape,a.dtype,i,l,!0);return n.makeTensorInfo(c,a.dtype,u)}};const kr={kernelName:t.SparseSegmentSum,backendName:"cpu",kernelFunc:function(e){const{inputs:t,backend:n}=e,{data:a,indices:s,segmentIds:r}=t;if(a.shape.length<1)throw new Error("Data should be at least 1 dimensional but received scalar");if(1!==s.shape.length)throw new Error(`Indices should be a vector but received shape\n ${s.shape}`);if(1!==r.shape.length)throw new Error(`Segment ids should be a vector but received shape\n ${r.shape}`);if(s.shape[0]!==r.shape[0])throw new Error("segmentIds and indices should have same size.");const o=n.data.get(a.dataId).values,i=n.data.get(s.dataId).values,l=n.data.get(r.dataId).values,[u,c]=ut(o,a.shape,a.dtype,i,l);return n.makeTensorInfo(c,a.dtype,u)}};const gr={kernelName:t.SparseToDense,backendName:"cpu",kernelFunc:function(e){const{inputs:n,backend:a,attrs:s}=e,{sparseIndices:r,sparseValues:o,defaultValue:i}=n,{outputShape:l}=s,{sliceRank:u,numUpdates:c,sliceSize:d,strides:p,outputSize:h}=t.backend_util.calculateShapes(o,r,l),f=!1,m=a.bufferSync(r);let k;switch(o.dtype){case"bool":k=et(m,a.bufferSync(o),l,h,d,c,u,p,Boolean(a.data.get(i.dataId).values[0]),f);break;case"float32":k=et(m,a.bufferSync(o),l,h,d,c,u,p,a.data.get(i.dataId).values[0],f);break;case"int32":k=et(m,a.bufferSync(o),l,h,d,c,u,p,a.data.get(i.dataId).values[0],f);break;case"string":k=et(m,a.bufferSync(o),l,h,d,c,u,p,t.util.decodeString(a.data.get(i.dataId).values[0]),f);break;default:throw new Error(`Unsupported type ${o.dtype}`)}return a.makeTensorInfo(l,k.dtype,k.values)}};const br={kernelName:t.SplitV,backendName:"cpu",kernelFunc:function(e){const{inputs:n,backend:a,attrs:s}=e,{x:r}=n,{numOrSizeSplits:o,axis:i}=s,l=t.util.parseAxisParam(i,r.shape)[0],u=t.backend_util.prepareSplitSize(r,o,l),c=new Array(r.shape.length).fill(0),d=r.shape.slice();return u.map((e=>{const t=[...d];t[l]=e;const n=rt({inputs:{x:r},backend:a,attrs:{begin:c,size:t}});return c[l]+=e,n}))}},Ir={kernelName:t.Square,backendName:"cpu",kernelFunc:({inputs:e,backend:t})=>{const{x:n}=e,a=t;r(n,"square");const s=a.data.get(n.dataId).values,o=new Float32Array(s.length);for(let e=0;e{const n=t;return isNaN(e)?NaN:e>0?1:n.alpha})),Sr={kernelName:t.Step,backendName:"cpu",kernelFunc:yr};const Tr={kernelName:t.StridedSlice,backendName:"cpu",kernelFunc:function(e){const{inputs:n,backend:a,attrs:s}=e,{x:o}=n,{begin:i,end:l,strides:u,beginMask:c,endMask:d,ellipsisMask:p,newAxisMask:h,shrinkAxisMask:f}=s;r(o,"stridedSlice");const{finalShapeSparse:m,finalShape:k,isIdentity:g,sliceDim0:b,isSimpleSlice:I,begin:y,end:S,strides:T}=t.slice_util.sliceInfo(o.shape,i,l,u,c,d,p,h,f);let N;if(g)N=Zt({inputs:{x:o},backend:a,attrs:{shape:k}});else if(b||I){t.util.assert(o.shape.length>=1,(()=>`Input must have rank at least 1, got: ${o.shape.length}`));const e=t.slice_util.computeOutShape(y,S,T),n=rt({inputs:{x:o},backend:a,attrs:{begin:y,size:e}});N=Zt({inputs:{x:n},backend:a,attrs:{shape:k}}),a.disposeIntermediateTensorInfo(n)}else{const e=It(m,a.bufferSync(o),T,y);N=a.makeTensorInfo(k,e.dtype,e.values)}return N}};const Nr={kernelName:t.StringNGrams,backendName:"cpu",kernelFunc:function(e){const{inputs:t,backend:n,attrs:a}=e,{separator:s,nGramWidths:r,leftPad:o,rightPad:i,padWidth:l,preserveShortSequences:u}=a,{data:c,dataSplits:d}=t,p=n.data.get(c.dataId).values,h=n.data.get(d.dataId).values,[f,m]=St(p,h,s,r,o,i,l,u);return[n.makeTensorInfo([f.length],"string",f),n.makeTensorInfo(d.shape,"int32",m)]}};const xr={kernelName:t.StringSplit,backendName:"cpu",kernelFunc:function(e){const{inputs:t,backend:n,attrs:a}=e,{skipEmpty:s}=a,{input:r,delimiter:o}=t;if("string"!==r.dtype)throw new Error("Input must be of datatype string");if(1!==r.shape.length)throw new Error(`Input must be a vector, got shape: ${r.shape}`);if(0!==o.shape.length)throw new Error(`Delimiter must be a scalar, got shape: ${o.shape}`);const i=n.data.get(r.dataId).values,l=n.data.get(o.dataId).values[0],[u,c,d]=Nt(i,l,s),p=c.length;return[n.makeTensorInfo([p,2],"int32",u),n.makeTensorInfo([p],"string",c),n.makeTensorInfo([2],"int32",new Int32Array(d))]}};const vr={kernelName:t.StringToHashBucketFast,backendName:"cpu",kernelFunc:function(e){const{inputs:t,backend:n,attrs:a}=e,{numBuckets:s}=a,{input:r}=t;if("string"!==r.dtype)throw new Error("Input must be of datatype string");if(s<=0)throw new Error("Number of buckets must be at least 1");const o=xt(n.data.get(r.dataId).values,s);return n.makeTensorInfo(r.shape,"int32",o)}},Fr=z(t.Tan,(e=>Math.tan(e))),wr={kernelName:t.Tan,backendName:"cpu",kernelFunc:Fr},Mr=z(t.Tanh,(e=>Math.tanh(e))),Ar={kernelName:t.Tanh,backendName:"cpu",kernelFunc:Mr};const Dr={kernelName:t.TensorScatterUpdate,backendName:"cpu",kernelFunc:function(e){const{inputs:n,backend:a}=e,{tensor:s,indices:r,updates:o}=n,{sliceRank:i,numUpdates:l,sliceSize:u,strides:c,outputSize:d}=t.backend_util.calculateShapes(o,r,s.shape),p=a.bufferSync(r),h=a.bufferSync(o),f=a.bufferSync(s),m=et(p,h,s.shape,d,u,l,i,c,f,!1);return a.makeTensorInfo(s.shape,m.dtype,m.values)}};const Er={kernelName:t.Tile,backendName:"cpu",kernelFunc:function(e){const{inputs:t,backend:n,attrs:a}=e,{x:s}=t,{reps:o}=a;r(s,"tile");const i=At(n.bufferSync(s),o);return n.makeTensorInfo(i.shape,i.dtype,i.values)}};const _r={kernelName:t.TopK,backendName:"cpu",kernelFunc:function(e){const{inputs:t,backend:n,attrs:a}=e,{x:s}=t,{k:o,sorted:i}=a;r(s,"topk");const l=n.data.get(s.dataId).values,[u,c]=_t(l,s.shape,s.dtype,o,i);return[n.makeTensorInfo(u.shape,u.dtype,u.values),n.makeTensorInfo(c.shape,c.dtype,c.values)]}};const zr={kernelName:t.Transform,backendName:"cpu",kernelFunc:function(e){const{inputs:n,attrs:a,backend:s}=e,{image:r,transforms:o}=n,{interpolation:i,fillMode:l,fillValue:u,outputShape:c}=a,[d,p,h,f]=r.shape,[m,k]=null!=c?c:[p,h],g=[d,m,k,f],b=t.util.computeStrides(r.shape),I=b[0],y=b[1],S=b[2],T=t.util.computeStrides(g),N=T[0],x=T[1],v=T[2],F=t.util.getTypedArrayFromDType(r.dtype,t.util.sizeFromShape(g));F.fill(u);const w=s.data.get(r.dataId).values,M=s.data.get(o.dataId).values;for(let e=0;en-1)if(n<=1)a=0;else{const e=2*n;a-=e*Math.trunc(a/e),a>=n&&(a=e-a-1)}return t.util.clamp(0,a,n-1)}(e,n);case"wrap":return function(e,n){let a=e;if(a<0)if(n<=1)a=0;else{const e=n-1;a+=n*(Math.trunc(-a/e)+1)}else if(a>n-1)if(n<=1)a=0;else{const e=n-1;a-=n*Math.trunc(a/e)}return t.util.clamp(0,a,n-1)}(e,n);case"nearest":return function(e,n){return t.util.clamp(0,e,n-1)}(e,n);default:return function(e,t){return e}(e)}}function Wr(e,t,n,a,s,r,o,i,l,u,c){return 0<=i&&ia.disposeIntermediateTensorInfo(e))),h}},$r=[Jt,u,Xt,tn,F,nn,an,sn,rn,on,un,dn,hn,kn,bn,Tn,Nn,xn,vn,Yt,Fn,wn,Mn,E,An,y,C,En,p,_n,Pn,Hn,On,Bn,$n,Vn,Gn,qn,Zn,jn,Kn,Yn,Jn,Qn,ea,ta,na,aa,sa,ra,oa,ia,ca,Pt,da,$,Ia,L,Sa,Z,Ma,Da,Ea,Y,X,_a,za,Ra,Wa,se,ie,m,Pa,Rn,Ha,Ba,Va,Ht,ce,he,Ga,ge,qa,ja,Ya,Xa,es,ts,as,Se,ss,rs,os,is,ls,us,cs,xe,ds,fs,gs,Me,De,Is,Ss,Ns,ze,xs,ws,As,Ds,zs,$t,He,Rs,Ws,Ps,Cs,g,xa,Os,Gt,qt,jt,Bs,$s,Vs,Gs,Ls,qs,Zs,Xe,js,Js,Qs,nr,at,sr,or,lr,ot,ks,dr,pr,hr,fr,mr,kr,gr,br,pt,Ir,mt,bt,Sr,Tr,Nr,xr,vr,Mt,ua,wr,Ar,Dr,Er,_r,zr,Pe,Hr,Or,Br,Fs];for(const e of $r)t.registerKernel(e);e.MathBackendCPU=i,e.shared=Rt,e.version_cpu="4.15.0"})); //# sourceMappingURL=tf-backend-cpu.es2017.min.js.map