/** * @license * Copyright 2023 Google LLC. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= */ import{util as e,kernel_impls as t,KernelBackend as n,DataStorage as a,engine as s,env as r,backend_util as o,buffer as i,Abs as l,Complex as d,Identity as c,Real as p,Cast as u,Add as h,BitwiseAnd as f,Ceil as m,Equal as g,Exp as k,Expm1 as I,Floor as b,FloorDiv as y,Greater as N,GreaterEqual as T,Less as x,LessEqual as S,Log as v,Maximum as F,Minimum as w,Multiply as M,Neg as A,NotEqual as D,Transpose as E,upcastType as z,Prod as W,tidy as R,reshape as P,broadcastTo as H,Rsqrt as C,TensorBuffer as $,Sigmoid as O,slice_util as V,Slice as _,Sqrt as G,SquaredDifference as B,StaticRegexReplace as L,Sub as q,registerBackend as U,Elu as Z,LeakyRelu as K,Prelu as j,Relu as Y,Relu6 as J,Reshape as Q,BatchMatMul as X,broadcast_util as ee,_FusedMatMul as te,Acos as ne,Acosh as ae,AddN as se,All as re,Any as oe,ArgMax as ie,ArgMin as le,Asin as de,Asinh as ce,Atan as pe,Atan2 as ue,Atanh as he,AvgPool as fe,AvgPool3D as me,AvgPool3DGrad as ge,AvgPoolGrad as ke,FusedBatchNorm as Ie,BatchToSpaceND as be,Bincount as ye,BroadcastArgs as Ne,ClipByValue as Te,ComplexAbs as xe,Imag as Se,Concat as ve,Conv2D as Fe,Conv2DBackpropFilter as we,Conv2DBackpropInput as Me,Conv3D as Ae,Conv3DBackpropFilterV2 as De,Conv3DBackpropInputV2 as Ee,Cos as ze,Cosh as We,CropAndResize as Re,Cumprod as Pe,Cumsum as He,DenseBincount as Ce,DepthToSpace as $e,DepthwiseConv2dNative as Oe,DepthwiseConv2dNativeBackpropFilter as Ve,DepthwiseConv2dNativeBackpropInput as _e,Diag as Ge,Dilation2D as Be,Dilation2DBackpropFilter as Le,Dilation2DBackpropInput as qe,Draw as Ue,Sum as Ze,Einsum as Ke,EluGrad as je,Erf as Ye,ExpandDims as Je,RealDiv as Qe,FFT as Xe,Fill as et,FlipLeftRight as tt,FusedConv2D as nt,FusedDepthwiseConv2D as at,GatherNd as st,GatherV2 as rt,IFFT as ot,IsFinite as it,IsInf as lt,IsNan as dt,LinSpace as ct,Log1p as pt,LogicalAnd as ut,LogicalNot as ht,LogicalOr as ft,LRN as mt,LRNGrad as gt,Max as kt,MaxPool as It,MaxPool3D as bt,MaxPool3DGrad as yt,MaxPoolGrad as Nt,MaxPoolWithArgmax as Tt,Mean as xt,Min as St,MirrorPad as vt,Mod as Ft,Softmax as wt,Multinomial as Mt,NonMaxSuppressionV3 as At,NonMaxSuppressionV4 as Dt,NonMaxSuppressionV5 as Et,OneHot as zt,ZerosLike as Wt,OnesLike as Rt,Pack as Pt,PadV2 as Ht,Pow as Ct,RaggedGather as $t,RaggedRange as Ot,RaggedTensorToTensor as Vt,Range as _t,Reciprocal as Gt,ResizeBilinear as Bt,ResizeBilinearGrad as Lt,ResizeNearestNeighbor as qt,ResizeNearestNeighborGrad as Ut,Reverse as Zt,RotateWithOffset as Kt,Round as jt,ScatterNd as Yt,SearchSorted as Jt,Select as Qt,Selu as Xt,Sign as en,Sin as tn,Sinh as nn,Softplus as an,SpaceToBatchND as sn,SparseFillEmptyRows as rn,SparseReshape as on,SparseSegmentMean as ln,SparseSegmentSum as dn,SparseToDense as cn,SplitV as pn,Square as un,Step as hn,StridedSlice as fn,StringNGrams as mn,StringSplit as gn,StringToHashBucketFast as kn,Tan as In,Tanh as bn,TensorScatterUpdate as yn,Tile as Nn,TopK as Tn,Transform as xn,Unique as Sn,Unpack as vn,UnsortedSegmentSum as Fn,registerKernel as wn}from"@tensorflow/tfjs-core";import*as Mn from"seedrandom";function An(t,n){Array.isArray(t)||(t=[t]),t.forEach((t=>{null!=t&&e.assert("complex64"!==t.dtype,(()=>`${n} does not support complex64 tensors in the CPU backend.`))}))}const Dn=t.whereImpl;class En extends n{nextDataId(){return En.nextDataId++}constructor(){super(),this.blockSize=48,this.firstUse=!0,this.data=new a(this,s())}write(e,t,n){this.firstUse&&(this.firstUse=!1,r().get("IS_NODE")&&o.warn("\n============================\nHi, looks like you are running TensorFlow.js in Node.js. To speed things up dramatically, install our node backend, visit https://github.com/tensorflow/tfjs-node for more details. \n============================"));const a={id:this.nextDataId()};return this.data.set(a,{values:e,dtype:n,refCount:1}),a}makeTensorInfo(t,n,a){let s;if("string"===n&&null!=a&&a.length>0&&e.isString(a[0])){const r=a.map((t=>e.encodeString(t)));s=this.write(r,t,n)}else s=this.write(a,t,n);return{dataId:s,shape:t,dtype:n}}refCount(e){if(this.data.has(e)){return this.data.get(e).refCount}return 0}incRef(e){this.data.get(e).refCount++}decRef(e){if(this.data.has(e)){this.data.get(e).refCount--}}move(e,t,n,a,s){this.data.set(e,{values:t,dtype:a,refCount:s})}numDataIds(){return this.data.numDataIds()}async read(e){return this.readSync(e)}readSync(t){const{dtype:n,complexTensorInfos:a}=this.data.get(t);if("complex64"===n){const e=this.readSync(a.real.dataId),t=this.readSync(a.imag.dataId);return o.mergeRealAndImagArrays(e,t)}return e.convertBackendValuesAndArrayBuffer(this.data.get(t).values,n)}bufferSync(t){const n=this.readSync(t.dataId);if("string"===t.dtype)try{const a=n.map((t=>e.decodeString(t)));return i(t.shape,t.dtype,a)}catch(e){throw new Error("Failed to decode encoded string bytes into utf-8")}return i(t.shape,t.dtype,n)}makeOutput(e,t,n){return s().makeTensorFromTensorInfo(this.makeTensorInfo(t,n,e),this)}disposeData(e,t=!1){if(this.data.has(e)){if(this.data.get(e).refCount--,!t&&this.data.get(e).refCount>0)return!1;const{complexTensorInfos:n}=this.data.get(e);null!=n&&(this.disposeData(n.real.dataId,!0),this.disposeData(n.imag.dataId,!0)),this.data.delete(e)}return!0}disposeIntermediateTensorInfo(e){this.disposeData(e.dataId)}async time(t){const n=e.now();t();return{kernelMs:e.now()-n}}memory(){return{unreliable:!0,reasons:["The reported memory is an upper bound. Due to automatic garbage collection, the true allocated memory may be less."]}}where(e){An([e],"where");const t=this.readSync(e.dataId);return Dn(e.shape,t)}dispose(){}floatPrecision(){return 32}epsilon(){return super.epsilon()}}function zn(e){const t=new Float32Array(e.length);for(let n=0;n{const{x:n}=t.inputs,a=t.backend;An(n,"abs");let s=new Float32Array(e.sizeFromShape(n.shape));return s=zn(a.data.get(n.dataId).values),a.makeOutput(s,n.shape,n.dtype)}};function Rn(t){return(n,a,s,r,i)=>{const l=o.assertAndGetBroadcastShape(n,a),d=l.length,c=e.computeStrides(l),p=e.sizeFromShape(l),u=e.getTypedArrayFromDType(i,p),h=n.length,f=a.length,m=e.computeStrides(n),g=e.computeStrides(a),k=o.getBroadcastDims(n,l),I=o.getBroadcastDims(a,l);if(k.length+I.length===0)for(let e=0;eo[e]=0));const i=e.locToIndex(o,h,m),l=a.slice(-f);I.forEach((e=>l[e]=0));const p=e.locToIndex(l,f,g);u[n]=t(s[i],r[p])}return[u,l]}}function Pn(e){const{inputs:t,backend:n}=e,{real:a,imag:s}=t,r=n.data.get(a.dataId).values,o=n.data.get(s.dataId).values,i=n.makeTensorInfo(a.shape,"complex64");return n.data.get(i.dataId).complexTensorInfos={real:n.makeTensorInfo(a.shape,"float32",r),imag:n.makeTensorInfo(s.shape,"float32",o)},i}const Hn={kernelName:d,backendName:"cpu",kernelFunc:Pn};function Cn(t,n,a="float32"){if("complex64"===a){return Pn({inputs:{real:Cn(t,n,"float32"),imag:Cn(t,n,"float32")},backend:t})}const s=e.makeZerosTypedArray(e.sizeFromShape(n),a);return t.makeTensorInfo(n,a,s)}function $n(e){const{inputs:t,backend:n}=e,{x:a}=t;return n.incRef(a.dataId),{dataId:a.dataId,shape:a.shape,dtype:a.dtype}}const On={kernelName:c,backendName:"cpu",kernelFunc:$n};function Vn(e){const{inputs:t,backend:n}=e,{input:a}=t,s=n.data.get(a.dataId).complexTensorInfos.real,r=n.data.get(s.dataId).values;return n.makeTensorInfo(s.shape,s.dtype,r)}const _n={kernelName:p,backendName:"cpu",kernelFunc:Vn};function Gn(t,n,a,s){if("int32"===s){return[n,"int32",Int32Array.from(t)]}if("bool"===s){const s=e.toTypedArray([0],a),[r,o]=Rn(((e,t)=>e!==t?1:0))(n,[],t,s,"bool");return[o,"bool",r]}throw new Error(`Error in Cast: failed to cast ${a} to ${s}`)}function Bn(t){const{inputs:n,backend:a,attrs:s}=t,{x:r}=n,{dtype:o}=s;if("complex64"===o){if("complex64"===r.dtype)return $n({inputs:{x:r},backend:a});const e=Cn(a,r.shape,r.dtype),t=Bn({inputs:{x:r},backend:a,attrs:{dtype:"float32"}}),n=Pn({inputs:{real:t,imag:e},backend:a});return a.disposeIntermediateTensorInfo(e),a.disposeIntermediateTensorInfo(t),n}if("complex64"===r.dtype){const e=Vn({inputs:{input:r},backend:a}),t=Bn({inputs:{x:e},backend:a,attrs:{dtype:o}});return a.disposeIntermediateTensorInfo(e),t}if(!e.hasEncodingLoss(r.dtype,o)){const e=$n({inputs:{x:r},backend:a});return{dataId:e.dataId,shape:e.shape,dtype:o}}const i=a.data.get(r.dataId).values,[l,d,c]=Gn(i,r.shape,r.dtype,o);return a.makeTensorInfo(l,d,c)}const Ln={kernelName:u,backendName:"cpu",kernelFunc:Bn};function qn(e,t,n,a){return null==n?({inputs:n,backend:s})=>{const{a:r,b:i}=n,l=s;An([r,i],e);const d=l.data.get(r.dataId).values,c=l.data.get(i.dataId).values,p="string"===r.dtype?o.fromUint8ToStringArray(d):d,u="string"===r.dtype?o.fromUint8ToStringArray(c):c,h=a||r.dtype,[f,m]=t(r.shape,i.shape,p,u,h);return l.makeTensorInfo(m,h,f)}:({inputs:e,backend:s})=>{const{a:r,b:o}=e,i=s;if("complex64"===r.dtype||"complex64"===o.dtype){const e=Bn({inputs:{x:r},backend:i,attrs:{dtype:"complex64"}}),t=i.data.get(e.dataId),a=t.complexTensorInfos.real,s=t.complexTensorInfos.imag,l=i.data.get(a.dataId).values,d=i.data.get(s.dataId).values,c=Bn({inputs:{x:o},backend:i,attrs:{dtype:"complex64"}}),p=i.data.get(c.dataId),u=p.complexTensorInfos.real,h=p.complexTensorInfos.imag,f=i.data.get(u.dataId).values,m=i.data.get(h.dataId).values,[g,k,I]=n(r.shape,o.shape,l,d,f,m),b=i.makeTensorInfo(I,"float32",g),y=i.makeTensorInfo(I,"float32",k),N=Pn({inputs:{real:b,imag:y},backend:i});return i.disposeIntermediateTensorInfo(e),i.disposeIntermediateTensorInfo(c),i.disposeIntermediateTensorInfo(b),i.disposeIntermediateTensorInfo(y),N}{const e=i.data.get(r.dataId).values,n=i.data.get(o.dataId).values,s=a||r.dtype,[l,d]=t(r.shape,o.shape,e,n,s);return i.makeTensorInfo(d,s,l)}}}function Un(t){return(n,a,s,r,i,l)=>{const d=o.assertAndGetBroadcastShape(n,a),c=e.sizeFromShape(d),p=d.length,u=e.computeStrides(d),h=e.getTypedArrayFromDType("float32",c),f=e.getTypedArrayFromDType("float32",c),m=o.getBroadcastDims(n,d),g=o.getBroadcastDims(a,d),k=o.mergeRealAndImagArrays(s,r),I=o.mergeRealAndImagArrays(i,l),b=n.length,y=e.computeStrides(n),N=a.length,T=e.computeStrides(a);if(m.length+g.length===0)for(let e=0;es[e]=0));const r=e.locToIndex(s,b,y),o=a.slice(-N);g.forEach((e=>o[e]=0));const i=e.locToIndex(o,N,T),l=t(k[2*r],k[2*r+1],I[2*i],I[2*i+1]);h[n]=l.real,f[n]=l.imag}return[h,f,d]}}const Zn=Rn(((e,t)=>e+t)),Kn=qn(h,Zn,Un(((e,t,n,a)=>({real:e+n,imag:t+a})))),jn={kernelName:h,backendName:"cpu",kernelFunc:Kn};function Yn(t,n,a,s,r){const o=e.sizeFromShape(s),i=e.makeZerosTypedArray(r,a);for(let e=0;e=r||(i[a]+=o>0?n[e]:1)}return i}function Jn(e,t,n,a=!1){const s=e.shape[0],r=e.shape[1],o=i([s,n],t.dtype);for(let i=0;i=n||(a?o.set(1,i,r):t.size>0?o.set(o.get(i,r)+t.get(i,s),i,r):o.set(o.get(i,r)+1,i,r))}return o}const Qn=Rn(((e,t)=>e&t)),Xn={kernelName:f,backendName:"cpu",kernelFunc:qn(f,Qn)};function ea(t){return(n,a,s)=>{const r=e.getArrayFromDType(a,n.length);for(let e=0;e{const{x:i}=a;An(i,e);const l=r,d=l.data.get(i.dataId).values;let c;if("string"===i.dtype){if(!Array.isArray(d))throw new Error("String tensor's value was not an instance of Array");c=o.fromUint8ToStringArray(d)}else c=d;const p=n||i.dtype,u=t(c,p,s);return l.makeTensorInfo(i.shape,p,u)}}const aa=ea((e=>Math.ceil(e))),sa={kernelName:m,backendName:"cpu",kernelFunc:na(m,aa)};function ra(t,n,a,s){const r=e.getArrayFromDType(a,e.sizeFromShape(n));if(s&&"string"!==a){let n=0;t.forEach((t=>{const a=e.sizeFromShape(t.shape);r.set(t.vals,n),n+=a}))}else{let e=0;t.forEach((t=>{const s="string"===a?o.fromUint8ToStringArray(t.vals):t.vals;let i=0;for(let a=0;ae===t?1:0)),ia=qn(g,oa,null,"bool"),la={kernelName:g,backendName:"cpu",kernelFunc:ia},da=ea((e=>Math.exp(e))),ca=na(k,da,"float32"),pa={kernelName:k,backendName:"cpu",kernelFunc:ca},ua=ea((e=>Math.expm1(e))),ha={kernelName:I,backendName:"cpu",kernelFunc:na(I,ua)},fa=ea((e=>Math.floor(e))),ma={kernelName:b,backendName:"cpu",kernelFunc:na(b,fa)},ga=Rn(((e,t)=>Math.floor(e/t))),ka={kernelName:y,backendName:"cpu",kernelFunc:qn(y,ga,null,"int32")};function Ia(e,t,n,a,s,r,o,l,d){const c=i([a,r],n);for(let n=0;n=d/r)throw new Error(`Invalid indices: ${a} does not index into ${l}`);for(let e=0;ee>t?1:0)),Na={kernelName:N,backendName:"cpu",kernelFunc:qn(N,ya,null,"bool")},Ta=Rn(((e,t)=>e>=t?1:0)),xa={kernelName:T,backendName:"cpu",kernelFunc:qn(T,Ta,null,"bool")},Sa=Rn(((e,t)=>ee<=t?1:0)),wa={kernelName:S,backendName:"cpu",kernelFunc:qn(S,Fa,null,"bool")};function Ma(t,n,a){const s=(n-t)/(a-1),r=e.makeZerosTypedArray(a,"float32");r[0]=t;for(let e=1;eMath.log(e))),Da={kernelName:v,backendName:"cpu",kernelFunc:na(v,Aa)};function Ea(t,n,a,s){const r=e.getTypedArrayFromDType(s,e.sizeFromShape(a));for(let e=0;es)&&(s=n)}r[e]=s}return r}const za=Rn(((e,t)=>Math.max(e,t))),Wa={kernelName:F,backendName:"cpu",kernelFunc:qn(F,za)},Ra=Rn(((e,t)=>Math.min(e,t))),Pa={kernelName:w,backendName:"cpu",kernelFunc:qn(w,Ra)},Ha=Rn(((e,t)=>e*t)),Ca=Un(((e,t,n,a)=>({real:e*n-t*a,imag:e*a+t*n}))),$a=qn(M,Ha,Ca),Oa={kernelName:M,backendName:"cpu",kernelFunc:$a};function Va(t,n,a){const s=e.createScalarValue(-1,a);return Ha([],n,s,t,a)}const _a={kernelName:A,backendName:"cpu",kernelFunc:function(e){const{inputs:t,backend:n}=e,{x:a}=t;An(a,"neg");const s=n.data.get(a.dataId).values,[r,o]=Va(s,a.shape,a.dtype);return n.makeTensorInfo(o,a.dtype,r)}},Ga=Rn(((e,t)=>e!==t?1:0)),Ba={kernelName:D,backendName:"cpu",kernelFunc:qn(D,Ga,null,"bool")};function La(t,n,a,s,r){const o=n.length,i=e.sizeFromShape(n),l=e.computeStrides(n),d=e.computeStrides(r),c=e.getTypedArrayFromDType(a,e.sizeFromShape(r));for(let n=0;na.disposeIntermediateTensorInfo(e))),a.makeTensorInfo(b,I,g)}};function ja(e,t,n,a){const s=[];let r=0;const o=t.length-1+n.length,i=new Array(o).fill(null).map((()=>[0]));!function(e,t){for(let n=0;ns)throw new Error("Ragged splits must not point past values");for(let e=1;ea[e])throw new Error("Ragged splits must be sorted in ascending order")}}(n,a);let l=1;for(let e=0;e=0){const e=i[s],t=e[e.length-1]-a[o];for(let e=o;e{if(t<0||t>=a){const r=e.indexToLoc(s,n.length,e.computeStrides(n)).join(",");throw new Error(`indices[${r}] = ${t} is not in [0, ${a})`)}}))}(o,i,n[0][0]-1),0===s.length)throw new Error("params.rank must be nonzero");const d=s[0],{outSplits:c,valueSlices:p,numValues:u}=ja(o,i,t,d),h=function(t){const n=[];for(let a=0;ar[t]=e))}return n}(c),f=Ja(a,s,r,p,u);return[h,f[0],f[1]]}function Xa(t,n,a,s,r,o,i){if(n.length>1)throw new Error("starts must be a scalar or vector");if(r.length>1)throw new Error("limits must be a scalar or vector");if(i.length>1)throw new Error("deltas must be a scalar or vector");const l=0===n.length,d=0===r.length,c=0===i.length,p=[];l||p.push(n[0]),d||p.push(r[0]),c||p.push(i[0]);for(let e=1;e0&&an)i=0;else if(i=Math.ceil(Math.abs((a-n)/r)),i>2147483647)throw new Error("Requires ((limit - start) / delta) <= 2147483647");h[e+1]=h[e]+i}const f=h[u],m=e.getArrayFromDType(a,f);let g=0;for(let e=0;en&&(n=t)}return n}static getMaxWidthValueRowID(e){const t=e.length;if(0===t)return 0;let n=0,a=e[0],s=0;for(let r=1;r"Final length of result must be equal to firstDimension.")),r}calculateOutputIndexRowSplit(e,t,n,a){const s=e.length,r=[];for(let o=0;o0&&r.length!==e[s-1])throw new Error("Invalid row split size.");return r}calculateOutputIndexValueRowID(e,t,n,a){const s=e.length,r=[];if(0===s)return[];let o=0,i=e[0];if(i>=t.length)throw new Error(`Got currentValueRowId=${i}, which is not less than ${t.length}`);let l=t[i];r.push(l);for(let d=1;d=0&&(++o,o=t.length)throw new Error(`Got nextValueRowId=${s} which is not less than ${t.length}`);l=t[s]}r.push(l)}if(r.length!==e.length)throw new Error("Invalid row ids.");return r}calculateOutputIndex(e,t,n,a){const s=this.getRowPartitionTensor(e),r=this.getRowPartitionTypeByDimension(e);switch(r){case es.VALUE_ROWIDS:return this.calculateOutputIndexValueRowID(s,t,n,a);case es.ROW_SPLITS:if(s.length-1>t.length)throw new Error(`Row partition size is greater than output size: ${s.length-1} > ${t.length}`);return this.calculateOutputIndexRowSplit(s,t,n,a);default:throw new Error(`Unsupported partition type: ${es[r]}`)}}getFirstDimensionSize(){const e=this.rowPartitionValues[0];if(0===this.rowPartitionTypes.length)throw new Error("No row_partition_types given.");const t=this.rowPartitionTypes[0];switch(t){case es.FIRST_DIM_SIZE:return e[0];case es.VALUE_ROWIDS:throw new Error("Cannot handle VALUE_ROWIDS in first dimension.");case es.ROW_SPLITS:return this.rowPartitionValuesShapes[0][0]-1;default:throw new Error(`Cannot handle type ${es[t]}`)}}compute(){if(this.rowPartitionValues[0].length<=0)throw new Error("Invalid first partition input. Tensor requires at least one element.");const t=this.getFirstDimensionSize(),n=this.calculateOutputSize(t),a=new Array(this.raggedRank+1);a[a.length-1]=1;for(let e=a.length-2;e>=0;--e)a[e]=a[e+1]*n[e+1];const s=as(n,!1),r=e.getArrayFromDType(this.valuesDType,e.sizeFromShape(s));if(a[0]*n[0]>0){let e=this.calculateFirstParentOutputIndex(t,a[0],n[0]);for(let t=1;t<=this.raggedRank;++t){e=this.calculateOutputIndex(t-1,e,a[t],n[t])}this.setOutput(this.raggedRank,e,r,s)}return[s,r]}setOutput(t,n,a,s){if(0===a.length)return;const r=this.values,o=a;let i=s.slice();i=i.slice(t+1);const l=e.sizeFromShape(i),d=n.length;let c=this.defaultValue;if(c.length!==l&&1!==c.length){const e=this.defaultValueShape;R((()=>{const t=P(c,e),n=H(t,i);c=n.dataSync()}))}let p=0,u=0,h=0;for(let e=0;e<=d;++e){let t=e=d){const e=a.length;t=Math.floor(e/l)}if(t>h)if(1===this.defaultValue.length)o.subarray(h*l,t*l).fill(this.defaultValue[0]),h=t;else for(;t>h;){ns(o.slice(h*l),c,l),++h}t<0?(p=e+1,u=h):(p=e,u=h,h=u+1)}else++h}}}function ns(e,t,n){for(let a=0;a= 0`);if(a<-1)throw new Error(`Dimension ${a} must be >= -1`);a=-1}n.push(a)}return n}function ss(e,t,n,a,s,r,o,i,l,d){return new ts(e,t,n,a,s,r,o,i,l,d).compute()}function rs(t,n,a,s){if(t===n||t1)return e.makeZerosTypedArray(0,s);const r=Math.abs(Math.ceil((n-t)/a)),o=e.makeZerosTypedArray(r,s);n1/Math.sqrt(e))),is={kernelName:C,backendName:"cpu",kernelFunc:na(C,os)};function ls(e,t,n,a,s,r,o,l,d,c){const p=[a/s,s],u=e.values,h=t.values;if(0===a)return i(n,t.dtype);const f=d instanceof $?d:i(p,t.dtype);"string"==typeof d||"number"==typeof d?f.values.fill(d):"boolean"==typeof d&&f.values.fill(+d);for(let e=0;e=a/s)throw new Error(`Invalid indices: ${r} does not index into ${n}`);for(let n=0;n1/(1+Math.exp(-e)))),cs=ta(O,(e=>1/(1+Math.exp(-e)))),ps={kernelName:O,backendName:"cpu",kernelFunc:cs};function us(t,n,a,s,r){const l=V.isSliceContinous(s,n,a),d=e.sizeFromShape(a),c=e.computeStrides(s);if(l){const e=V.computeFlatOffset(n,c);return"string"===r?t.slice(e,e+d):t.subarray(e,e+d)}const p="string"===r?o.fromUint8ToStringArray(t):t,u=i(s,r,p),h=i(a,r);for(let e=0;ee+n[t]));h.set(u.get(...a),...t)}return"string"===r?o.fromStringArrayToUint8(h.values):h.values}function hs(e){const{inputs:t,backend:n,attrs:a}=e,{x:s}=t,{begin:r,size:o}=a;An(s,"slice");const[i,l]=V.parseSliceParams(s,r,o);V.assertParamsValid(s,i,l);const d=us(n.data.get(s.dataId).values,i,l,s.shape,s.dtype);return n.makeTensorInfo(l,s.dtype,d)}const fs={kernelName:_,backendName:"cpu",kernelFunc:hs};function ms(t,n,a,s,r,i,l){const d=n[0],c=i[0],p=new Array(c),u=new Array(d),h=n[1];if(0===c){if(0!==d)throw new Error(o.getSparseFillEmptyRowsIndicesDenseShapeMismatch(d));return[e.getArrayFromDType(a,0),[0,h],e.getArrayFromDType(r,0),p,u]}let f=!0,m=0;const g=new Array(c).fill(0);for(let e=0;e=c)throw new Error(o.getSparseFillEmptyRowsOutOfRangeIndexErrorMessage(e,n,c));++g[n],f=f&&n>=m,m=n}let k=!0;for(let e=0;e0&&(g[e]+=g[e-1])}if(k&&f){const e=t,n=s;for(let e=0;e0){f[h-1]=1;for(let e=h-2;e>=0;--e)f[e]=f[e+1]*s[e+1]}const m=[];if(d>0){m[d-1]=1;for(let e=d-2;e>=0;--e)m[e]=m[e+1]*c[e+1]}const g=e.getArrayFromDType(a,l*d);for(let e=0;e0?r[d-1]+1:0;if(u<0)throw new Error(o.getSparseSegmentReductionNegativeSegmentIdsErrorMessage());const h=n.slice();h[0]=u;const f=h.reduce(((e,t)=>e*t),1),m=e.getArrayFromDType(a,f);if(0===d)return u>0&&m.fill(l),[m,h];if(u<=0)throw new Error(o.getSparseSegmentReductionNegativeSegmentIdsErrorMessage());let g=0,k=1,I=0,b=r[g];for(;;){let e=0;if(k=e)throw new Error(o.getSparseSegmentReductionNonIncreasingSegmentIdsErrorMessage())}if(b<0||b>=u)throw new Error(o.getSparseSegmentReductionSegmentIdOutOfRangeErrorMessage(b,u));b>I&&m.fill(l,I*p,b*p);for(let e=g;e=c[0])throw new Error(o.getSparseSegmentReductionIndicesOutOfRangeErrorMessage(e,s[e],c[0]));for(let e=0;ed)break}return IMath.sqrt(e))),bs={kernelName:G,backendName:"cpu",kernelFunc:ta(G,(e=>Math.sqrt(e)))},ys=Rn(((e,t)=>{const n=e-t;return n*n})),Ns={kernelName:B,backendName:"cpu",kernelFunc:qn(B,ys)},Ts=ea(((e,t)=>{const{pattern:n,replaceGlobal:a,rewrite:s}=t;return e.replace(new RegExp(n,a?"g":""),s)})),xs={kernelName:L,backendName:"cpu",kernelFunc:na(L,Ts)};function Ss(e,t,n,a){const s=i(e,t.dtype);for(let e=0;e0?0:o-i);let u=0;u+=l*this.leftPad.length;for(let t=0;te.forEach((e=>h[f++]=e));for(let e=0;e0){m(e[p+c-1]);for(let e=0;e0){let e=n[0];if(0!==e)throw new Error(`First split value must be 0, got ${e}`);for(let t=1;t=e;if(s=s&&n[t]<=a,!s)throw new Error(`Invalid split value ${n[t]}, must be in [${e}, ${a}]`);e=n[t]}if(e!==a)throw new Error(`Last split value must be data size. Expected ${a}, got ${e}`)}const r=s-1,o=e.getArrayFromDType("int32",s);if(0===a||0===s){const e=new Array(a);for(let e=0;e<=r;++e)o[e]=0;return[e,o]}o[0]=0;for(let e=1;e<=r;++e){const t=n[e]-n[e-1];let a=0;this.nGramWidths.forEach((e=>{a+=this.getNumNGrams(t,e)})),this.preserveShort&&t>0&&0===a&&(a=1),o[e]=o[e-1]+a}const i=new Array(o[r]);for(let e=0;e{const o=n[e+1]-n[e],l=this.getNumNGrams(o,r);this.createNGrams(t,a,i,s,l,r),s+=l})),this.preserveShort&&s===o[e]){const r=n[e+1]-n[e];if(0===r)continue;const o=r+2*this.padWidth,l=1;this.createNGrams(t,a,i,s,l,o)}}return[i,o]}}function Fs(e,t,n,a,s,r,o,i){return new vs(n,a,s,r,o,i).compute(e,t)}function ws(e,t,n,a){if(!e.length)return;if(0===t.length){for(let t=0;te-t)),Es=qn(q,Ds,Un(((e,t,n,a)=>({real:e-n,imag:t-a})))),zs={kernelName:q,backendName:"cpu",kernelFunc:Es};function Ws(e,t){const n=new Array(e.rank);for(let a=0;a{const n=t.value-e.value;return 0===n?e.index-t.index:n};function Ps(t,n,a=0,s=t.length-1){for(;s>a;){if(s-a>600){const e=s-a+1,r=n-a+1,o=Math.log(e),i=.5*Math.exp(2*o/3),l=.5*Math.sqrt(o*i*(e-i)/e)*Math.sign(r-e/2);Ps(t,n,Math.max(a,Math.floor(n-r*i/e+l)),Math.min(s,Math.floor(n+(e-r)*i/e+l)))}const r=t[n];let o=a,i=s;for(e.swap(t,a,n),Rs(t[s],r)>0&&e.swap(t,a,s);o0;)i-=1}0===Rs(t[a],r)?e.swap(t,a,i):(i+=1,e.swap(t,i,s)),i<=n&&(a=i+1),n<=i&&(s=i-1)}}function Hs(t,n,a,s,r){const o=n[n.length-1],[l,d]=[t.length/o,o],c=e.getTypedArrayFromDType(a,l*s),p=e.getTypedArrayFromDType("int32",l*s);for(let e=0;eo[t]={value:e,index:t})),s{for(let n=0;nnew En),1);const Vs=ta(Z,(e=>e>=0?e:Math.exp(e)-1)),_s={kernelName:Z,backendName:"cpu",kernelFunc:Vs};function Gs(t){const{inputs:n,backend:a,attrs:s}=t,{x:r}=n,{alpha:o}=s;An([r],"leakyRelu");const i=e.sizeFromShape(r.shape),l=a.data.get(r.dataId).values,d=e.getTypedArrayFromDType("float32",i);for(let e=0;ee<0?t*e:e));function qs(e){const{inputs:t,backend:n}=e,{x:a,alpha:s}=t;An([a,s],"prelu");const r=n.data.get(a.dataId).values,o=n.data.get(s.dataId).values,[i,l]=Ls(a.shape,s.shape,r,o,"float32");return n.makeTensorInfo(l,"float32",i)}const Us={kernelName:j,backendName:"cpu",kernelFunc:qs},Zs=ta(Y,(e=>Math.max(0,e))),Ks={kernelName:Y,backendName:"cpu",kernelFunc:Zs},js=ta(J,(e=>Math.min(Math.max(0,e),6))),Ys={kernelName:J,backendName:"cpu",kernelFunc:js};function Js(e,t,n,a,s){if("linear"===n)return $n({inputs:{x:t},backend:e});if("relu"===n)return Zs({inputs:{x:t},backend:e});if("elu"===n)return Vs({inputs:{x:t},backend:e});if("relu6"===n)return js({inputs:{x:t},backend:e});if("prelu"===n)return qs({inputs:{x:t,alpha:a},backend:e});if("leakyrelu"===n)return Gs({inputs:{x:t},backend:e,attrs:{alpha:s}});if("sigmoid"===n)return cs({inputs:{x:t},backend:e});throw new Error(`Activation ${n} has not been implemented for the CPU backend.`)}function Qs(t){const{inputs:n,backend:a,attrs:s}=t,{x:r}=n,{shape:o}=s,i=e.sizeFromShape(r.shape),l=e.inferFromImplicitShape(o,i),d=e.sizeFromShape(l);e.assert(i===d,(()=>`The new shape (${l}) has ${d} elements and the old shape (${r.shape}) has ${i} elements. The new shape and old shape must have the same number of elements.`)),a.incRef(r.dataId);const c=a.data.get(r.dataId);if(null!=c.complexTensorInfos){const e=c.complexTensorInfos.real,t=c.complexTensorInfos.imag;e.shape=l,t.shape=l}return{dataId:r.dataId,shape:l,dtype:r.dtype}}const Xs={kernelName:Q,backendName:"cpu",kernelFunc:Qs};function er(t){const{inputs:n,backend:a,attrs:s}=t,{a:r,b:o}=n,{transposeA:l,transposeB:d}=s;An([r,o],"matMul");const c=r.shape.length,p=o.shape.length,u=l?r.shape[c-2]:r.shape[c-1],h=d?o.shape[p-1]:o.shape[p-2],f=l?r.shape[c-1]:r.shape[c-2],m=d?o.shape[p-2]:o.shape[p-1],g=r.shape.slice(0,-2),k=o.shape.slice(0,-2),I=e.sizeFromShape(g),b=e.sizeFromShape(k),y=ee.assertAndGetBroadcastShape(r.shape.slice(0,-2),o.shape.slice(0,-2)).concat([f,m]);e.assert(u===h,(()=>`Error in matMul: inner shapes (${u}) and (${h}) of Tensors with shapes ${r.shape} and ${o.shape} and transposeA=${l} and transposeB=${d} must match.`));const N=d?[b,m,h]:[b,h,m],T=Qs({inputs:{x:r},backend:a,attrs:{shape:l?[I,u,f]:[I,f,u]}}),x=Qs({inputs:{x:o},backend:a,attrs:{shape:N}}),S=l?T.shape[1]:T.shape[2],v=l?T.shape[2]:T.shape[1],F=d?x.shape[1]:x.shape[2],w=Math.max(I,b),M=a.data.get(T.dataId).values,A=a.data.get(x.dataId).values,D=e.computeStrides(T.shape),E=e.computeStrides(x.shape),[z,W,R]=l?[D[0],1,D[1]]:[D[0],D[1],1],[P,H,C]=d?[1,E[1],E[0]]:[E[1],1,E[0]],$=v*F,O=i([w,v,F],T.dtype),V=O.values,_=a.blockSize;for(let e=0;eMath.acos(e)))},sr={kernelName:ae,backendName:"cpu",kernelFunc:ta(ae,(e=>Math.acosh(e)))};const rr={kernelName:se,backendName:"cpu",kernelFunc:function(e){const{inputs:t,backend:n}=e,a=t;An(t,"addN");const s=a.map((e=>n.data.get(e.dataId).values)),r=i(a[0].shape,a[0].dtype),o=r.values;for(let e=0;en&&(n=s,a=e)}m[e]=a}return p.forEach((e=>a.disposeIntermediateTensorInfo(e))),a.makeTensorInfo(u,"int32",m)}};const dr={kernelName:le,backendName:"cpu",kernelFunc:function(t){const{inputs:n,backend:a,attrs:s}=t,{x:r}=n,{axis:i}=s;An(r,"argMin");let l=e.parseAxisParam(i,r.shape);const d=o.getAxesPermutation(l,r.shape.length);let c=r;const p=[];null!=d&&(c=qa({inputs:{x:r},backend:a,attrs:{perm:d}}),p.push(c),l=o.getInnerMostAxes(l.length,c.shape.length)),l=[l[0]],o.assertAxesAreInnerMostDims("argMin",l,c.shape.length);const[u,h]=o.computeOutAndReduceShapes(c.shape,l),f=e.sizeFromShape(u),m=e.makeZerosTypedArray(f,"int32"),g=e.sizeFromShape(h),k=a.data.get(c.dataId).values;for(let e=0;ea.disposeIntermediateTensorInfo(e))),a.makeTensorInfo(u,"int32",m)}},cr={kernelName:de,backendName:"cpu",kernelFunc:ta(de,(e=>Math.asin(e)))},pr={kernelName:ce,backendName:"cpu",kernelFunc:ta(ce,(e=>Math.asinh(e)))},ur={kernelName:pe,backendName:"cpu",kernelFunc:ta(pe,(e=>Math.atan(e)))},hr={kernelName:ue,backendName:"cpu",kernelFunc:qn(ue,Rn(((e,t)=>Math.atan2(e,t))))},fr={kernelName:he,backendName:"cpu",kernelFunc:ta(he,(e=>Math.atanh(e)))};function mr(e,t,n,a,s,r){const o=s.strideHeight,l=s.strideWidth,d=s.dilationHeight,c=s.dilationWidth,p=s.effectiveFilterHeight,u=s.effectiveFilterWidth,h=s.padInfo.top,f=s.padInfo.left,m="max"===r?Number.NEGATIVE_INFINITY:Number.POSITIVE_INFINITY,g=i(s.outShape,n),k=g.values,I=s.outShape[1]*s.outShape[2]*s.outShape[3],b=s.outShape[2]*s.outShape[3],y=s.outShape[3];for(let t=0;tg?g=o:"avg"===r&&(I+=o,b++)}if(isNaN(g))break}k[x+n*y+t]="avg"===r?I/b:g}}}return g}function gr(e,t,n,a,s=!1,r=!1){const o=i(a.outShape,"int32"),l=a.strideHeight,d=a.strideWidth,c=a.dilationHeight,p=a.dilationWidth,u=a.effectiveFilterHeight,h=a.effectiveFilterWidth,f=a.padInfo.top,m=a.padInfo.left,g=i(t,n,e);for(let e=0;ey&&(y=d,N=s?r?((e*a.inHeight+n)*a.inWidth+i)*a.inChannels+t:(n*a.inWidth+i)*a.inChannels+t:o*h+l)}}o.set(N,e,n,l,t)}}return o}function kr(e,t,n,a,s,r){const o=s.strideDepth,l=s.strideHeight,d=s.strideWidth,c=s.dilationDepth,p=s.dilationHeight,u=s.dilationWidth,h=s.effectiveFilterDepth,f=s.effectiveFilterHeight,m=s.effectiveFilterWidth,g=s.padInfo.front,k=s.padInfo.top,I=s.padInfo.left,b="max"===r?Number.NEGATIVE_INFINITY:Number.POSITIVE_INFINITY,y=i(s.outShape,n),N=y.values,T=s.outShape[1]*s.outShape[2]*s.outShape[3]*s.outShape[4],x=s.outShape[2]*s.outShape[3]*s.outShape[4],S=s.outShape[3]*s.outShape[4],v=s.outShape[4];for(let t=0;tT?T=s:"avg"===r&&(x+=s,S++),isNaN(T))break}if(isNaN(T))break}if(isNaN(T))break}N[k+t]="avg"===r?x/Math.max(S,1):T}}}}return y}const Ir={kernelName:fe,backendName:"cpu",kernelFunc:function(t){const{inputs:n,backend:a,attrs:s}=t,{x:r}=n;An(r,"avgPool");const{filterSize:i,strides:l,pad:d,dimRoundingMode:c}=s;e.assert(o.eitherStridesOrDilationsAreOne(l,1),(()=>`Error in avgPool: Either strides or dilations must be 1. Got strides ${l} and dilations '1'`));const p=o.computePool2DInfo(r.shape,i,l,1,d,c);let u;if(1===p.filterWidth&&1===p.filterHeight&&e.arraysEqual(p.inShape,p.outShape))u=$n({inputs:{x:r},backend:a});else{const t=a.data.get(r.dataId).values,n=e.computeStrides(r.shape),s=mr(t,r.shape,r.dtype,n,p,"avg");u=a.makeTensorInfo(p.outShape,r.dtype,s.values)}return u}};const br={kernelName:me,backendName:"cpu",kernelFunc:function(t){const{inputs:n,backend:a,attrs:s}=t,{x:r}=n,{filterSize:i,strides:l,pad:d,dimRoundingMode:c,dataFormat:p}=s;An(r,"avgPool3d");const u=o.computePool3DInfo(r.shape,i,l,1,d,c,p),h=kr(a.data.get(r.dataId).values,r.shape,r.dtype,e.computeStrides(r.shape),u,"avg");return a.makeTensorInfo(h.shape,"float32",h.values)}};const yr={kernelName:ge,backendName:"cpu",kernelFunc:function(e){const{inputs:t,backend:n,attrs:a}=e,{dy:s,input:r}=t,{filterSize:l,strides:d,pad:c,dimRoundingMode:p}=a;An([s,r],"avgPool3DGrad");const u=o.computePool3DInfo(r.shape,l,d,1,c,p),h=u.strideDepth,f=u.strideHeight,m=u.strideWidth,g=u.filterDepth,k=u.filterHeight,I=u.filterWidth,b=u.dilationDepth,y=u.dilationHeight,N=u.dilationWidth,T=u.effectiveFilterDepth,x=u.effectiveFilterHeight,S=u.effectiveFilterWidth,v=T-1-u.padInfo.front,F=S-1-u.padInfo.left,w=x-1-u.padInfo.top,M=i(r.shape,"float32"),A=1/(g*k*I),D=n.bufferSync(s);for(let e=0;e=u.outDepth||Math.floor(a)!==a))for(let n=0;n=u.outHeight||Math.floor(s)!==s))for(let n=0;n=u.outWidth||Math.floor(r)!==r)continue;l+=D.get(e,a,s,r,t)}}}M.set(l*A,e,n,a,s,t)}return n.makeTensorInfo(M.shape,M.dtype,M.values)}};const Nr={kernelName:ke,backendName:"cpu",kernelFunc:function(e){const{inputs:t,backend:n,attrs:a}=e,{dy:s,input:r}=t,l=r;An([s,r],"avgPoolGrad");const{filterSize:d,strides:c,pad:p}=a,u=o.computePool2DInfo(l.shape,d,c,1,p),h=u.strideHeight,f=u.strideWidth,m=u.filterHeight,g=u.filterWidth,k=u.dilationHeight,I=u.dilationWidth,b=u.effectiveFilterHeight,y=u.effectiveFilterWidth,N=y-1-u.padInfo.left,T=b-1-u.padInfo.top,x=i(l.shape,"float32"),S=1/(m*g),v=n.data.get(s.dataId).values,F=i(s.shape,"float32",v);for(let e=0;e=u.outHeight||Math.floor(a)!==a))for(let n=0;n=u.outWidth||Math.floor(s)!==s)continue;o+=F.get(e,a,s,t)}}x.set(o*S,e,n,a,t)}return n.makeTensorInfo(x.shape,x.dtype,x.values)}};const Tr={kernelName:Ie,backendName:"cpu",kernelFunc:function(t){const{inputs:n,backend:a,attrs:s}=t,{x:r,scale:o,offset:i,mean:l,variance:d}=n;e.assert(l.shape.length===d.shape.length,(()=>"Batch normalization gradient requires mean and variance to have equal ranks.")),e.assert(null==i||l.shape.length===i.shape.length,(()=>"Batch normalization gradient requires mean and offset to have equal ranks.")),e.assert(null==o||l.shape.length===o.shape.length,(()=>"Batch normalization gradient requires mean and scale to have equal ranks.")),An([r,l,d,o,i],"batchNorm");let{varianceEpsilon:c}=s;null==c&&(c=.001);const p=a.data.get(r.dataId).values,u=a.data.get(l.dataId).values,h=a.data.get(d.dataId).values,f=o?a.data.get(o.dataId).values:new Float32Array([1]),m=i?a.data.get(i.dataId).values:new Float32Array([0]),g=new Float32Array(p.length),k=m.length,I=f.length,b=h.length,y=u.length;let N=0,T=0,x=0,S=0;for(let e=0;e=k&&(N=0),T>=y&&(T=0),x>=I&&(x=0),S>=b&&(S=0);return a.makeTensorInfo(r.shape,r.dtype,g)}};const xr={kernelName:be,backendName:"cpu",kernelFunc:function(e){const{inputs:t,backend:n,attrs:a}=e,{x:s}=t,{blockShape:r,crops:i}=a;An([s],"batchToSpaceND");const l=r.reduce(((e,t)=>e*t)),d=o.getReshaped(s.shape,r,l),c=o.getPermuted(d.length,r.length),p=o.getReshapedPermuted(s.shape,r,l),u=o.getSliceBeginCoords(i,r.length),h=o.getSliceSize(p,i,r.length),f=Qs({inputs:{x:s},backend:n,attrs:{shape:d}}),m=qa({inputs:{x:f},backend:n,attrs:{perm:c}}),g=Qs({inputs:{x:m},backend:n,attrs:{shape:p}}),k=hs({inputs:{x:g},backend:n,attrs:{begin:u,size:h}});return n.disposeIntermediateTensorInfo(f),n.disposeIntermediateTensorInfo(m),n.disposeIntermediateTensorInfo(g),k}};const Sr={kernelName:ye,backendName:"cpu",kernelFunc:function(e){const{inputs:t,backend:n,attrs:a}=e,{x:s,weights:r}=t,{size:o}=a,i=Yn(n.data.get(s.dataId).values,n.data.get(r.dataId).values,r.dtype,r.shape,o);return n.makeTensorInfo([o],r.dtype,i)}};const vr={kernelName:Ne,backendName:"cpu",kernelFunc:function(e){const{inputs:t,backend:n}=e,{s0:a,s1:s}=t,r=n.data.get(a.dataId).values,i=n.data.get(s.dataId).values,l=o.assertAndGetBroadcastShape(Array.from(r),Array.from(i));return n.makeTensorInfo([l.length],"int32",Int32Array.from(l))}},Fr={kernelName:Te,backendName:"cpu",kernelFunc:ta(Te,((e,t)=>{const n=t;return e>n.clipValueMax?n.clipValueMax:e{const{x:n}=t.inputs,a=t.backend,s=new Float32Array(e.sizeFromShape(n.shape)),r=a.data.get(n.dataId),o=r.complexTensorInfos.real,i=r.complexTensorInfos.imag,l=a.data.get(o.dataId).values,d=a.data.get(i.dataId).values;for(let e=0;ee.shape));o.assertParamsConsistent(l,i);let d=o.computeOutShape(n.map((e=>e.shape)),i);if(0===e.sizeFromShape(d))return a.makeTensorInfo(d,n[0].dtype,[]);const c=n.filter((t=>e.sizeFromShape(t.shape)>0));if(1===c.length)return $n({inputs:{x:c[0]},backend:a});if("complex64"===c[0].dtype){const e=c.map((e=>Vn({inputs:{input:e},backend:a}))),t=c.map((e=>Mr({inputs:{input:e},backend:a}))),n=Dr({inputs:e,backend:a,attrs:{axis:i}}),s=Dr({inputs:t,backend:a,attrs:{axis:i}}),r=Pn({inputs:{real:n,imag:s},backend:a});return e.forEach((e=>a.disposeIntermediateTensorInfo(e))),t.forEach((e=>a.disposeIntermediateTensorInfo(e))),a.disposeIntermediateTensorInfo(n),a.disposeIntermediateTensorInfo(s),r}const p=c.map((t=>{const n=e.sizeFromShape(t.shape.slice(i));return Qs({inputs:{x:t},backend:a,attrs:{shape:[-1,n]}})})),u=p.map((e=>({vals:a.data.get(e.dataId).values,shape:e.shape})));d=o.computeOutShape(p.map((e=>e.shape)),1);const h=1===p[0].shape[0],f=ra(u,d,n[0].dtype,h),m=o.computeOutShape(c.map((e=>e.shape)),i),g=a.makeTensorInfo(m,n[0].dtype,f);return p.forEach((e=>a.disposeIntermediateTensorInfo(e))),g}const Er={kernelName:ve,backendName:"cpu",kernelFunc:Dr};function zr(t){const{inputs:n,backend:a,attrs:s}=t,{x:r,filter:i}=n,{strides:l,pad:d,dataFormat:c,dilations:p,dimRoundingMode:u}=s;An([r,i],"conv2d");const h=o.convertConv2DDataFormat(c),f=o.computeConv2DInfo(r.shape,i.shape,l,p,d,u,!1,h),m=f.filterHeight,g=f.filterWidth,k=f.dilationHeight,I=f.dilationWidth,b=f.padInfo.left,y=f.padInfo.top,N="channelsLast"===f.dataFormat,T=new $(f.outShape,r.dtype),x=e.computeStrides(r.shape),S=e.computeStrides(i.shape),v=x[0],F=N?x[1]:x[2],w=N?x[2]:1,M=N?1:x[1],A=T.strides[0],D=N?T.strides[1]:T.strides[2],E=N?T.strides[2]:1,z=N?1:T.strides[1],W=a.data.get(r.dataId).values,R=a.data.get(i.dataId).values,P=T.values;for(let e=0;e=f.inHeight)continue;const r=e*S[0],o=t+n*F;for(let e=0;e=f.inWidth)continue;const s=o+a*w;let i=r+e*S[1];for(let e=0;e=p.inDepth)continue;const r=e*w[0],o=t+n*F[1];for(let e=0;e=p.inHeight)continue;const s=r+e*w[1],i=o+a*F[2];for(let e=0;e=p.inWidth)continue;const r=s+e*w[2],o=i+t*p.inChannels;let l=r;for(let e=0;eMath.cos(e)))},Vr={kernelName:We,backendName:"cpu",kernelFunc:ta(We,(e=>Math.cosh(e)))};const _r={kernelName:Re,backendName:"cpu",kernelFunc:function(t){const{inputs:n,backend:a,attrs:s}=t,{image:r,boxes:o,boxInd:l}=n,{cropSize:d,method:c,extrapolationValue:p}=s,[u,h,f,m]=r.shape,g=o.shape[0],[k,I]=d,b=i([g,k,I,m],"float32"),y=a.data.get(o.dataId).values,N=a.data.get(l.dataId).values,T=a.data.get(r.dataId).values,x=e.computeStrides(r.shape),S=e.computeStrides(b.shape);for(let e=0;e=u)continue;const i=k>1?(s-n)*(h-1)/(k-1):0,l=I>1?(r-a)*(f-1)/(I-1):0;for(let t=0;t1?n*(h-1)+t*i:.5*(n+s)*(h-1);if(d<0||d>h-1)for(let n=0;n1?a*(f-1)+d*l:.5*(a+r)*(f-1);if(c<0||c>f-1){for(let n=0;n1?a*(f-1)+n*l:.5*(a+r)*(f-1);if(s<0||s>f-1){for(let a=0;ae+g-t-1:(e,t)=>e+t;for(let e=0;ee+g-t-1:(e,t)=>e+t;for(let e=0;e`Only NHWC dataFormat supported on CPU for depthToSpace. Got ${i}`));const l=r.shape[0],d=r.shape[1],c=r.shape[2],p=r.shape[3],u=d*o,h=c*o,f=p/(o*o),m=a.data.get(r.dataId).values,g=new Float32Array(l*u*h*f);let k=0;for(let e=0;e`Error in depthwiseConv2d: Either strides or dilations must be 1. Got strides ${l} and dilations '${f}'`));const m=o.computeConv2DInfo(r.shape,i.shape,l,f,d,p,!0),{filterHeight:g,filterWidth:k,dilationHeight:I,dilationWidth:b,padInfo:y}=m,N=y.left,T=y.top,x=m.outChannels/m.inChannels,S=new $(m.outShape,r.dtype),v=a.data.get(r.dataId).values,F=a.data.get(i.dataId).values,w=S.values;for(let e=0;e=m.inHeight)continue;const r=e*h[0],o=t+n*u[1];for(let e=0;e=m.inWidth)continue;const s=r+e*h[1],i=o+a*m.inChannels;let l=t,d=s;for(let e=0;e{const{x:s,filter:r}=t,{strides:i,pad:l,dilations:d}=a,c=n,p=c.data.get(s.dataId).values,u=s.shape.length,h=c.data.get(r.dataId).values,f=r.shape.length,{batchSize:m,inHeight:g,inWidth:k,inChannels:I,outHeight:b,outWidth:y,padInfo:N,strideHeight:T,strideWidth:x,filterHeight:S,filterWidth:v,dilationHeight:F,dilationWidth:w,outShape:M}=o.computeDilation2DInfo(s.shape,r.shape,i,l,"NHWC",d),A=e.sizeFromShape(M),D=M.length,E=e.getArrayFromDType(s.dtype,A);for(let t=0;t=0&&o=0&&cd&&(d=g)}}}E[e.locToIndex([t,n,o,l],D,e.computeStrides(M))]=d}}}return{dataId:c.write(e.toTypedArray(E,s.dtype),M,s.dtype),shape:M,dtype:s.dtype}}},Qr={kernelName:Le,backendName:"cpu",kernelFunc:({inputs:t,backend:n,attrs:a})=>{const{x:s,filter:r,dy:i}=t,{strides:l,pad:d,dilations:c}=a,p=n,u=e.toNestedArray(s.shape,p.data.get(s.dataId).values),h=e.toNestedArray(r.shape,p.data.get(r.dataId).values),{batchSize:f,inHeight:m,inWidth:g,inChannels:k,outHeight:I,outWidth:b,padInfo:y,strideHeight:N,strideWidth:T,filterHeight:x,filterWidth:S,dilationHeight:v,dilationWidth:F,outShape:w}=o.computeDilation2DInfo(s.shape,r.shape,l,d,"NHWC",c);e.assert(i.rank===w.length,(()=>`Error in ${Le}, dy must have the same rank as output ${w.length}, but got ${i.rank}`));const M=e.toNestedArray(w,p.data.get(i.dataId).values),A=e.makeZerosNestedTypedArray(r.shape,r.dtype);for(let e=0;e=0&&a=0&&do&&(o=s,i=t,l=n)}}}A[i][l][r]+=M[e][t][a][r]}}}return{dataId:p.write(e.toTypedArray(A,s.dtype),r.shape,r.dtype),shape:r.shape,dtype:r.dtype}}},Xr={kernelName:qe,backendName:"cpu",kernelFunc:({inputs:t,backend:n,attrs:a})=>{const{x:s,filter:r,dy:i}=t,{strides:l,pad:d,dilations:c}=a,p=n,u=e.toNestedArray(s.shape,p.data.get(s.dataId).values),h=e.toNestedArray(r.shape,p.data.get(r.dataId).values),{batchSize:f,inHeight:m,inWidth:g,inChannels:k,outHeight:I,outWidth:b,padInfo:y,strideHeight:N,strideWidth:T,filterHeight:x,filterWidth:S,dilationHeight:v,dilationWidth:F,outShape:w}=o.computeDilation2DInfo(s.shape,r.shape,l,d,"NHWC",c);e.assert(i.rank===w.length,(()=>`Error in ${qe}, dy must have the same rank as output ${w.length}, but got ${i.rank}`));const M=e.toNestedArray(w,p.data.get(i.dataId).values),A=e.makeZerosNestedTypedArray(s.shape,s.dtype);for(let e=0;e=0&&a=0&&do&&(o=s,i=a,l=d)}}}A[e][i][l][r]+=M[e][t][a][r]}}}return{dataId:p.write(e.toTypedArray(A,s.dtype),s.shape,s.dtype),shape:s.shape,dtype:s.dtype}}};const eo={kernelName:Ue,backendName:"cpu",kernelFunc:function(e){const{inputs:t,backend:n,attrs:a}=e,{image:s}=t,{canvas:r,options:o}=a,{contextOptions:i,imageOptions:l}=o||{},d=(null==l?void 0:l.alpha)||1,c=(null==i?void 0:i.contextType)||"2d";if("2d"!==c)throw new Error(`Context type ${i.contextType} is not supported by the CPU backend.`);const p=r.getContext(c,(null==i?void 0:i.contextAttributes)||{});if(null==p)throw new Error(`Could not get the context with ${c} type.`);const[u,h]=s.shape.slice(0,2),f=2===s.shape.length?1:s.shape[2],m=n.data.get(s.dataId).values,g="float32"===s.dtype?255:1,k=new Uint8ClampedArray(h*u*4);for(let e=0;e1)throw new Error(`Tensor values for a float32 Tensor must be in the range [0 - 1] but encountered ${a}.`)}else if("int32"===s.dtype&&(a<0||a>255))throw new Error(`Tensor values for a int32 Tensor must be in the range [0 - 255] but encountered ${a}.`);1===f?(t[0]=a*g,t[1]=a*g,t[2]=a*g):t[n]=a*g}const n=4*e;k[n+0]=Math.round(t[0]),k[n+1]=Math.round(t[1]),k[n+2]=Math.round(t[2]),k[n+3]=Math.round(t[3])}r.width=h,r.height=u;const I=new ImageData(k,h,u);return p.putImageData(I,0,0),s}};function to(t){const{inputs:n,backend:a,attrs:s}=t,{x:r}=n,{axis:i,keepDims:l}=s;let d;An(r,"sum"),d="bool"===r.dtype?Bn({inputs:{x:r},backend:a,attrs:{dtype:"int32"}}):$n({inputs:{x:r},backend:a});const c=d.shape.length,p=e.parseAxisParam(i,d.shape),u=o.getAxesPermutation(p,c);let h=p,f=d;null!=u&&(f=qa({inputs:{x:d},backend:a,attrs:{perm:u}}),h=o.getInnerMostAxes(h.length,c)),o.assertAxesAreInnerMostDims("sum",h,f.shape.length);const[m,g]=o.computeOutAndReduceShapes(f.shape,h);let k=Cn(a,m,o.upcastType(f.dtype,"int32"));const I=e.sizeFromShape(g),b=a.data.get(k.dataId).values,y=a.data.get(f.dataId).values;for(let e=0;e=0&&(f=to({inputs:{x:f},backend:a,attrs:{axis:p[t]-(l.length-m),keepDims:!1}}),g.push(f)),m--)}for(const e of g)e!==f&&a.disposeIntermediateTensorInfo(e);return f}};const so={kernelName:je,backendName:"cpu",kernelFunc:function(t){const{inputs:n,backend:a}=t,{dy:s,y:r}=n;An([s,r],"eluGrad");const o=new Float32Array(e.sizeFromShape(r.shape)),i=a.data.get(r.dataId).values,l=a.data.get(s.dataId).values;for(let e=0;e=0?l[e]:l[e]*(t+1)}return a.makeTensorInfo(r.shape,"float32",o)}},ro=o.ERF_P,oo=o.ERF_A1,io=o.ERF_A2,lo=o.ERF_A3,co=o.ERF_A4,po=o.ERF_A5,uo=ta(Ye,(e=>{const t=Math.sign(e),n=Math.abs(e),a=1/(1+ro*n);return t*(1-((((po*a+co)*a+lo)*a+io)*a+oo)*a*Math.exp(-n*n))})),ho={kernelName:Ye,backendName:"cpu",kernelFunc:uo};function fo(t){const{inputs:n,backend:a,attrs:s}=t,{input:r}=n,{dim:o}=s,i=r.shape.length,l=r.shape.slice();let d=o;return o<0&&(e.assert(-(i+1)<=o,(()=>`Axis must be in the interval [${-(i+1)}, ${i}]`)),d=i+o+1),l.splice(d,0,1),Qs({inputs:{x:r},backend:a,attrs:{shape:l}})}const mo={kernelName:Je,backendName:"cpu",kernelFunc:fo},go=qn(Qe,Rn(((e,t)=>e/t))),ko={kernelName:Qe,backendName:"cpu",kernelFunc:go};function Io(t,n,a){const s=t.shape,r=s[0],i=s[1],l=a.data.get(t.dataId),d=l.complexTensorInfos.real,c=l.complexTensorInfos.imag,p=[r,i],u=e.sizeFromShape(p),h=e.getTypedArrayFromDType("float32",u),f=e.getTypedArrayFromDType("float32",u);for(let e=0;e{const{image:s}=t,r=a,o=e.getTypedArrayFromDType(s.dtype,e.sizeFromShape(s.shape)),[i,l,d,c]=s.shape,p=r.data.get(s.dataId).values;for(let e=0;e=0&&r=0,(()=>`GatherV2: the index value ${n} is not in [0, ${u-1}]`))}let h=d;null==d&&(h=0);const f=e.sizeFromShape(i.shape),m=o.segment_util.collectGatherOpShapeInfo(r,i,c,h),g=Qs({inputs:{x:r},backend:a,attrs:{shape:[m.batchSize,m.outerSize,m.dimSize,m.sliceSize]}}),k=Qs({inputs:{x:i},backend:a,attrs:{shape:[m.batchSize,f/m.batchSize]}}),I=[m.batchSize,m.outerSize,f/m.batchSize,m.sliceSize],b=a.bufferSync(k),y=ba(a.bufferSync(g),b,I);return a.disposeIntermediateTensorInfo(g),a.disposeIntermediateTensorInfo(k),a.makeTensorInfo(m.outputShape,y.dtype,y.values)}};const Ao={kernelName:ot,backendName:"cpu",kernelFunc:function(t){const{inputs:n,backend:a}=t,{input:s}=n,r=e.sizeFromShape(s.shape),o=s.shape[s.shape.length-1],i=Qs({inputs:{x:s},backend:a,attrs:{shape:[r/o,o]}}),l=Io(i,!0,a),d=Qs({inputs:{x:l},backend:a,attrs:{shape:s.shape}});return a.disposeIntermediateTensorInfo(i),a.disposeIntermediateTensorInfo(l),d}},Do={kernelName:it,backendName:"cpu",kernelFunc:ta(it,(e=>Number.isFinite(e)?1:0),"bool")},Eo={kernelName:lt,backendName:"cpu",kernelFunc:ta(lt,(e=>Math.abs(e)===1/0?1:0),"bool")},zo={kernelName:dt,backendName:"cpu",kernelFunc:ta(dt,(e=>Number.isNaN(e)?1:0),"bool")};const Wo={kernelName:ct,backendName:"cpu",kernelFunc:function(e){const{backend:t,attrs:n}=e,{start:a,stop:s,num:r}=n,o=Ma(a,s,r);return t.makeTensorInfo([o.length],"float32",o)}},Ro={kernelName:pt,backendName:"cpu",kernelFunc:ta(pt,(e=>Math.log1p(e)))},Po={kernelName:ut,backendName:"cpu",kernelFunc:qn(ut,Rn(((e,t)=>e&&t)),null,"bool")},Ho={kernelName:ht,backendName:"cpu",kernelFunc:ta(ht,(e=>e?0:1),"bool")},Co={kernelName:ft,backendName:"cpu",kernelFunc:qn(ft,Rn(((e,t)=>e||t)),null,"bool")};const $o={kernelName:mt,backendName:"cpu",kernelFunc:function(t){const{inputs:n,backend:a,attrs:s}=t,{x:r}=n,{depthRadius:o,bias:i,alpha:l,beta:d}=s;An(r,"LRN");const c=r.shape[3],p=c-1,u=a.data.get(r.dataId).values,h=e.sizeFromShape(r.shape),f=new Float32Array(h);function m(e){const t=e%c;let n=e-t+Math.max(0,t-o);const a=e-t+Math.min(t+o,p);let s=0;for(;n<=a;n++){const e=u[n];s+=e*e}return s}for(let e=0;e`Error in maxPool: Either strides or dilations must be 1. Got strides ${l} and dilations '1'`));const p=o.computePool2DInfo(r.shape,i,l,1,d,c);let u;if(1===p.filterWidth&&1===p.filterHeight&&e.arraysEqual(p.inShape,p.outShape))u=$n({inputs:{x:r},backend:a});else{const t=a.data.get(r.dataId).values,n=e.computeStrides(r.shape),s=mr(t,r.shape,r.dtype,n,p,"max");u=a.makeTensorInfo(p.outShape,r.dtype,s.values)}return u}};const Bo={kernelName:bt,backendName:"cpu",kernelFunc:function(t){const{inputs:n,backend:a,attrs:s}=t,{x:r}=n,{filterSize:i,strides:l,pad:d,dimRoundingMode:c,dataFormat:p}=s;An(r,"maxPool3d");const u=o.computePool3DInfo(r.shape,i,l,1,d,c,p),h=kr(a.data.get(r.dataId).values,r.shape,r.dtype,e.computeStrides(r.shape),u,"max");return a.makeTensorInfo(h.shape,"float32",h.values)}};const Lo={kernelName:yt,backendName:"cpu",kernelFunc:function(e){const{inputs:t,backend:n,attrs:a}=e,{dy:s,input:r}=t,{filterSize:l,strides:d,pad:c,dimRoundingMode:p}=a;An([s,r],"maxPool3DGrad");const u=o.computePool3DInfo(r.shape,l,d,1,c,p),h=function(e,t){const n=i(t.outShape,"int32"),a=t.strideDepth,s=t.strideHeight,r=t.strideWidth,o=t.dilationDepth,l=t.dilationHeight,d=t.dilationWidth,c=t.effectiveFilterDepth,p=t.effectiveFilterHeight,u=t.effectiveFilterWidth,h=t.padInfo.front,f=t.padInfo.top,m=t.padInfo.left;for(let i=0;i=S&&(S=l,v=n*p*u+s*p+o)}}}n.set(v,i,k,a,s,g)}}}return n}(n.bufferSync(r),u),f=u.strideDepth,m=u.strideHeight,g=u.strideWidth,k=u.dilationDepth,I=u.dilationHeight,b=u.dilationWidth,y=u.effectiveFilterDepth,N=u.effectiveFilterHeight,T=u.effectiveFilterWidth,x=y-1-u.padInfo.front,S=T-1-u.padInfo.left,v=N-1-u.padInfo.top,F=i(r.shape,"float32"),w=n.bufferSync(s);for(let e=0;e=u.outDepth||Math.floor(a)!==a))for(let s=0;s=u.outHeight||Math.floor(r)!==r))for(let o=0;o=u.outWidth||Math.floor(d)!==d)continue;const c=y*N*T-1-h.get(e,a,r,d,t)===n*N*T+s*T+o?1:0;if(0===c)continue;l+=w.get(e,a,r,d,t)*c}}}F.set(l,e,n,a,s,t)}return n.makeTensorInfo(F.shape,F.dtype,F.values)}};const qo={kernelName:Nt,backendName:"cpu",kernelFunc:function(e){const{inputs:t,backend:n,attrs:a}=e,{dy:s,input:r,output:l}=t,d=r;An([r,l],"maxPoolGrad");const{filterSize:c,strides:p,pad:u,dimRoundingMode:h}=a,f=o.computePool2DInfo(d.shape,c,p,1,u,h),m=n.data.get(d.dataId).values,g=i(f.outShape,d.dtype,gr(m,d.shape,d.dtype,f).values),k=f.strideHeight,I=f.strideWidth,b=f.dilationHeight,y=f.dilationWidth,N=f.effectiveFilterHeight,T=f.effectiveFilterWidth,x=T-1-f.padInfo.left,S=N-1-f.padInfo.top,v=i(d.shape,"float32"),F=n.data.get(s.dataId).values,w=i(s.shape,"float32",F);for(let e=0;e=f.outHeight||Math.floor(a)!==a))for(let s=0;s=f.outWidth||Math.floor(i)!==i)continue;const l=N*T-1-g.get(e,a,i,t)===n*T+s?1:0;if(0===l)continue;o+=w.get(e,a,i,t)*l}}v.set(o,e,n,a,t)}return n.makeTensorInfo(v.shape,v.dtype,v.values)}};const Uo={kernelName:Tt,backendName:"cpu",kernelFunc:({inputs:t,attrs:n,backend:a})=>{const{x:s}=t,{filterSize:r,strides:i,pad:l,includeBatchInIndex:d}=n,c=a;An(s,"MaxPoolWithArgmax");const p=c.data.get(s.dataId).values,u=o.computePool2DInfo(s.shape,r,i,[1,1],l),[h,f]=function(t,n,a,s,r){const o=mr(t,0,a,e.computeStrides(n),r,"max"),i=gr(t,n,a,r,!0,s);return[o.values,i.values]}(p,s.shape,s.dtype,d,u),m=c.write(h,u.outShape,s.dtype),g=c.write(f,u.outShape,s.dtype);return[{dataId:m,shape:u.outShape,dtype:s.dtype},{dataId:g,shape:u.outShape,dtype:"int32"}]}};const Zo={kernelName:xt,backendName:"cpu",kernelFunc:function(t){const{inputs:n,backend:a,attrs:s}=t,{x:r}=n,{axis:i,keepDims:l}=s,d=e.parseAxisParam(i,r.shape),c=o.computeOutAndReduceShapes(r.shape,d)[1],p=e.sizeFromShape(c),u=[],h=a.makeTensorInfo([],"float32",new Float32Array([p]));u.push(h);const f=Bn({inputs:{x:r},backend:a,attrs:{dtype:"float32"}});u.push(f);const m=go({inputs:{a:f,b:h},backend:a});u.push(m);const g=to({inputs:{x:m},backend:a,attrs:{axis:i,keepDims:l}});return u.forEach((e=>a.disposeIntermediateTensorInfo(e))),g}};const Ko={kernelName:St,backendName:"cpu",kernelFunc:function(t){const{inputs:n,backend:a,attrs:s}=t,{x:r}=n,{axis:i,keepDims:l}=s;An(r,"min");const d=e.parseAxisParam(i,r.shape);let c=d;const p=o.getAxesPermutation(c,r.shape.length);let u=r;null!=p&&(u=qa({inputs:{x:r},backend:a,attrs:{perm:p}}),c=o.getInnerMostAxes(c.length,r.shape.length)),o.assertAxesAreInnerMostDims("min",c,u.shape.length);const[h,f]=o.computeOutAndReduceShapes(u.shape,c),m=e.sizeFromShape(f),g=e.makeZerosTypedArray(e.sizeFromShape(h),u.dtype),k=a.data.get(u.dataId).values;for(let e=0;ee[0]+r.shape[t]+e[1])),d=o.map((e=>e[0])),c=o.map(((e,t)=>e[0]+r.shape[t])),p="reflect"===i?0:1,u=a.data.get(r.dataId).values,h=r.shape.length,f=e.computeStrides(r.shape),m=e.sizeFromShape(l),g=l.length,k=e.computeStrides(l),I=e.getTypedArrayFromDType(r.dtype,m);for(let t=0;t=c[e]&&(n[e]=2*(c[e]-1)-n[e]+p);n=n.map(((e,t)=>e-d[t]));const a=e.locToIndex(n,h,f);I[t]=u[a]}return{dataId:a.write(I,l,r.dtype),shape:l,dtype:r.dtype}}},Yo={kernelName:Ft,backendName:"cpu",kernelFunc:qn(Ft,Rn(((e,t)=>{const n=e%t;return e<0&&t<0||e>=0&&t>=0?n:(n+t)%t})))};function Jo(t){const{inputs:n,backend:a,attrs:s}=t,{logits:r}=n,{dim:i}=s,l=r.shape.length;let d=i;if(-1===d&&(d=l-1),d!==l-1)throw Error(`Softmax along a non-last dimension is not yet supported. Logits was rank ${l} and dim was ${d}`);const c=e.parseAxisParam([d],r.shape),p=Vo({inputs:{x:r},backend:a,attrs:{reductionIndices:c,keepDims:!1}}),u=o.expandShapeToKeepDim(p.shape,c),h=Qs({inputs:{x:p},backend:a,attrs:{shape:u}}),f=Es({inputs:{a:r,b:h},backend:a}),m=ca({inputs:{x:f},backend:a}),g=to({inputs:{x:m},backend:a,attrs:{axis:c,keepDims:!1}}),k=Qs({inputs:{x:g},backend:a,attrs:{shape:u}}),I=go({inputs:{a:m,b:k},backend:a});return a.disposeIntermediateTensorInfo(p),a.disposeIntermediateTensorInfo(h),a.disposeIntermediateTensorInfo(f),a.disposeIntermediateTensorInfo(m),a.disposeIntermediateTensorInfo(g),a.disposeIntermediateTensorInfo(k),I}const Qo={kernelName:wt,backendName:"cpu",kernelFunc:Jo};const Xo={kernelName:Mt,backendName:"cpu",kernelFunc:function(t){const{inputs:n,backend:a,attrs:s}=t,{logits:r}=n,{numSamples:o,seed:i,normalized:l}=s;An(r,"multinomial");const d=l?r:Jo({inputs:{logits:r},backend:a,attrs:{dim:-1}}),c=d.shape[0],p=d.shape[1],u=a.data.get(d.dataId).values,h=[c,o],f=e.makeZerosTypedArray(e.sizeFromShape(h),"int32");for(let e=0;e=0&&u[e]{e.assertShapesMatch(o,t.shape,"All tensors passed to stack must have matching shapes"),e.assert(i===t.dtype,(()=>"All tensors passed to stack must have matching dtypes"))}));const l=[],d=Dr({inputs:n.map((e=>{const t=fo({inputs:{input:e},backend:a,attrs:{dim:r}});return l.push(t),t})),backend:a,attrs:{axis:r}});return l.forEach((e=>a.disposeIntermediateTensorInfo(e))),d}const pi={kernelName:Pt,backendName:"cpu",kernelFunc:ci};const ui={kernelName:Ht,backendName:"cpu",kernelFunc:function(t){const{inputs:n,backend:a,attrs:s}=t,{x:r}=n,{paddings:o,constantValue:i}=s;An(r,"pad");const l=o.map(((e,t)=>e[0]+r.shape[t]+e[1])),d=o.map((e=>e[0])),c=a.data.get(r.dataId).values,p=e.sizeFromShape(r.shape),u=r.shape.length,h=e.computeStrides(r.shape),f=e.sizeFromShape(l),m=l.length,g=e.computeStrides(l),k=e.getTypedArrayFromDType(r.dtype,f);0!==i&&k.fill(i);for(let t=0;te+d[t]));k[e.locToIndex(n,m,g)]=c[t]}return{dataId:a.write(k,l,r.dtype),shape:l,dtype:r.dtype}}},hi={kernelName:Ct,backendName:"cpu",kernelFunc:qn(Ct,Rn(((e,t)=>Math.pow(e,t))))};const fi={kernelName:$t,backendName:"cpu",kernelFunc:function(e){const{inputs:t,backend:n,attrs:a}=e,{paramsNestedSplits:s,paramsDenseValues:r,indices:o}=t,i=s.map((e=>n.data.get(e.dataId).values)),l=s.map((e=>e.shape)),d=n.data.get(r.dataId).values,c=n.data.get(o.dataId).values,[p,u,h]=Qa(i,l,d,r.shape,r.dtype,c,o.shape),f=p.map((e=>n.makeTensorInfo([e.length],"int32",e))),m=n.makeTensorInfo(h,r.dtype,u);return f.concat([m])}};const mi={kernelName:Ot,backendName:"cpu",kernelFunc:function(e){const{inputs:t,backend:n}=e,{starts:a,limits:s,deltas:r}=t,o=n.data.get(a.dataId).values,i=n.data.get(s.dataId).values,l=n.data.get(r.dataId).values,[d,c]=Xa(o,a.shape,a.dtype,i,s.shape,l,r.shape);return[n.makeTensorInfo([d.length],"int32",d),n.makeTensorInfo([c.length],a.dtype,c)]}};const gi={kernelName:Vt,backendName:"cpu",kernelFunc:function(e){const{inputs:t,backend:n,attrs:a}=e,{shape:s,values:r,defaultValue:o,rowPartitionTensors:i}=t,{rowPartitionTypes:l}=a,d=n.data.get(s.dataId).values,c=n.data.get(r.dataId).values,p=n.data.get(o.dataId).values,u=i.map((e=>n.data.get(e.dataId).values)),h=i.map((e=>e.shape)),[f,m]=ss(d,s.shape,c,r.shape,r.dtype,p,o.shape,u,h,l);return n.makeTensorInfo(f,r.dtype,m)}};const ki={kernelName:_t,backendName:"cpu",kernelFunc:function(e){const{backend:t,attrs:n}=e,{start:a,stop:s,dtype:r,step:o}=n,i=rs(a,s,o,r);return t.makeTensorInfo([i.length],r,i)}},Ii={kernelName:Gt,backendName:"cpu",kernelFunc:ta(Gt,(e=>1/e))};const bi={kernelName:Bt,backendName:"cpu",kernelFunc:function(t){const{inputs:n,backend:a,attrs:s}=t,{images:r}=n,{alignCorners:o,halfPixelCenters:i,size:l}=s;An(r,"resizeBilinear");const d=e.computeStrides(r.shape),[c,p]=l,[u,h,f,m]=r.shape,g=a.data.get(r.dataId).values,k=new Float32Array(e.sizeFromShape([u,c,p,m])),I=[o&&c>1?h-1:h,o&&p>1?f-1:f],b=[o&&c>1?c-1:c,o&&p>1?p-1:p];let y=0;const N=I[0]/b[0],T=I[1]/b[1];for(let e=0;e1?c-1:c,i&&f>1?p-1:p],k=[i&&h>1?h-1:h,i&&f>1?f-1:f],I=g[0]/k[0],b=g[1]/k[1],y=a.data.get(o.dataId).values;let N=0;for(let e=0;e1?h-1:h,o&&p>1?f-1:f],b=[o&&c>1?c-1:c,o&&p>1?p-1:p],y=I[0]/b[0],N=I[1]/b[1];let T=0;for(let e=0;e1?p-1:p,i&&m>1?u-1:u],b=[i&&f>1?f-1:f,i&&m>1?m-1:m],y=I[0]/b[0],N=I[1]/b[1],T=1/y,x=1/N,S=2*Math.ceil(T)+2,v=2*Math.ceil(x)+2;for(let e=0;e=f)continue;const h=t+l*d[1],g=l*y;if(e===Math.min(p-1,i?Math.round(g):Math.floor(g)))for(let e=0;e=m)continue;const s=h+t*d[2],r=t*N;a===Math.min(u-1,i?Math.round(r):Math.floor(r))&&(o+=k[s+n])}}g[r+n]=o}}}}return a.makeTensorInfo(r.shape,r.dtype,g)}};const xi={kernelName:Zt,backendName:"cpu",kernelFunc:function(t){const{inputs:n,backend:a,attrs:s}=t,{x:r}=n,{dims:o}=s;An(r,"reverse");const i=r.shape.length,l=e.parseAxisParam(o,r.shape);if(0===i)return $n({inputs:{x:r},backend:a});const d=new $(r.shape,r.dtype),c=a.bufferSync(r);for(let e=0;en[e]=r.shape[e]-1-n[e])),d.set(c.get(...n),...t)}return a.makeTensorInfo(d.shape,d.dtype,d.values)}},Si={kernelName:Kt,backendName:"cpu",kernelFunc:({inputs:t,attrs:n,backend:a})=>{const{image:s}=t,{radians:r,fillValue:i,center:l}=n,d=a,c=e.getTypedArrayFromDType(s.dtype,e.sizeFromShape(s.shape)),[p,u,h,f]=s.shape,[m,g]=o.getImageCenter(l,u,h),k=Math.sin(r),I=Math.cos(r),b=d.data.get(s.dataId).values;for(let e=0;e=0&&y=0&&N{const t=Math.floor(e);return e-t<.5?Math.floor(e):e-t>.5?Math.ceil(e):t%2==0?t:t+1}))};const Fi={kernelName:Yt,backendName:"cpu",kernelFunc:function(e){const{inputs:t,backend:n,attrs:a}=e,{indices:s,updates:r}=t,{shape:i}=a,{sliceRank:l,numUpdates:d,sliceSize:c,strides:p,outputSize:u}=o.calculateShapes(r,s,i),h=ls(n.bufferSync(s),n.bufferSync(r),i,u,c,d,l,p,0,!0);return n.makeTensorInfo(i,h.dtype,h.values)}};function wi(e,t){let n=0,a=e.length,s=0;for(;n1||1===r.shape.length?1:e.sizeFromShape(r.shape.slice(1));for(let e=0;ee>=0?zi*e:Ei*(Math.exp(e)-1)))},Ri={kernelName:en,backendName:"cpu",kernelFunc:ta(en,(e=>e<0?-1:e>0?1:0))},Pi={kernelName:tn,backendName:"cpu",kernelFunc:ta(tn,(e=>Math.sin(e)))},Hi={kernelName:nn,backendName:"cpu",kernelFunc:ta(nn,(e=>Math.sinh(e)))},Ci=Math.log(1.1920928955078125e-7)+2,$i={kernelName:an,backendName:"cpu",kernelFunc:ta(an,(e=>{const t=e>-Ci,n=eNumber(e))))),n.makeTensorInfo([m.length],a.dtype,new Int32Array(m))]}};const _i={kernelName:on,backendName:"cpu",kernelFunc:function(e){const{inputs:t,backend:n}=e,{inputIndices:a,inputShape:s,newShape:r}=t;if(2!==a.shape.length)throw new Error(`Input indices should be a matrix but received shape\n ${a.shape}`);if(1!==s.shape.length)throw new Error(`Input shape should be a vector but received shape\n ${s.shape}`);if(1!==r.shape.length)throw new Error(`Target shape should be a vector but received shape ${r.shape}`);const o=Array.from(n.data.get(s.dataId).values),i=n.data.get(a.dataId).values,l=Array.from(n.data.get(r.dataId).values),[d,c,p]=gs(i,a.shape,a.dtype,o,l);return[n.makeTensorInfo(c,a.dtype,d),n.makeTensorInfo([p.length],r.dtype,new Int32Array(p))]}};const Gi={kernelName:ln,backendName:"cpu",kernelFunc:function(e){const{inputs:t,backend:n}=e,{data:a,indices:s,segmentIds:r}=t;if(a.shape.length<1)throw new Error("Data should be at least 1 dimensional but received scalar");if(1!==s.shape.length)throw new Error(`Indices should be a vector but received shape\n ${s.shape}`);if(1!==r.shape.length)throw new Error(`Segment ids should be a vector but received shape\n ${r.shape}`);if(s.shape[0]!==r.shape[0])throw new Error("segmentIds and indices should have same size.");const o=n.data.get(a.dataId).values,i=n.data.get(s.dataId).values,l=n.data.get(r.dataId).values,[d,c]=ks(o,a.shape,a.dtype,i,l,!0);return n.makeTensorInfo(c,a.dtype,d)}};const Bi={kernelName:dn,backendName:"cpu",kernelFunc:function(e){const{inputs:t,backend:n}=e,{data:a,indices:s,segmentIds:r}=t;if(a.shape.length<1)throw new Error("Data should be at least 1 dimensional but received scalar");if(1!==s.shape.length)throw new Error(`Indices should be a vector but received shape\n ${s.shape}`);if(1!==r.shape.length)throw new Error(`Segment ids should be a vector but received shape\n ${r.shape}`);if(s.shape[0]!==r.shape[0])throw new Error("segmentIds and indices should have same size.");const o=n.data.get(a.dataId).values,i=n.data.get(s.dataId).values,l=n.data.get(r.dataId).values,[d,c]=ks(o,a.shape,a.dtype,i,l);return n.makeTensorInfo(c,a.dtype,d)}};const Li={kernelName:cn,backendName:"cpu",kernelFunc:function(t){const{inputs:n,backend:a,attrs:s}=t,{sparseIndices:r,sparseValues:i,defaultValue:l}=n,{outputShape:d}=s,{sliceRank:c,numUpdates:p,sliceSize:u,strides:h,outputSize:f}=o.calculateShapes(i,r,d),m=!1,g=a.bufferSync(r);let k;switch(i.dtype){case"bool":k=ls(g,a.bufferSync(i),d,f,u,p,c,h,Boolean(a.data.get(l.dataId).values[0]),m);break;case"float32":k=ls(g,a.bufferSync(i),d,f,u,p,c,h,a.data.get(l.dataId).values[0],m);break;case"int32":k=ls(g,a.bufferSync(i),d,f,u,p,c,h,a.data.get(l.dataId).values[0],m);break;case"string":k=ls(g,a.bufferSync(i),d,f,u,p,c,h,e.decodeString(a.data.get(l.dataId).values[0]),m);break;default:throw new Error(`Unsupported type ${i.dtype}`)}return a.makeTensorInfo(d,k.dtype,k.values)}};const qi={kernelName:pn,backendName:"cpu",kernelFunc:function(t){const{inputs:n,backend:a,attrs:s}=t,{x:r}=n,{numOrSizeSplits:i,axis:l}=s,d=e.parseAxisParam(l,r.shape)[0],c=o.prepareSplitSize(r,i,d),p=new Array(r.shape.length).fill(0),u=r.shape.slice();return c.map((e=>{const t=[...u];t[d]=e;const n=hs({inputs:{x:r},backend:a,attrs:{begin:p,size:t}});return p[d]+=e,n}))}},Ui={kernelName:un,backendName:"cpu",kernelFunc:({inputs:e,backend:t})=>{const{x:n}=e,a=t;An(n,"square");const s=a.data.get(n.dataId).values,r=new Float32Array(s.length);for(let e=0;e{const n=t;return isNaN(e)?NaN:e>0?1:n.alpha}))};const Ki={kernelName:fn,backendName:"cpu",kernelFunc:function(t){const{inputs:n,backend:a,attrs:s}=t,{x:r}=n,{begin:o,end:i,strides:l,beginMask:d,endMask:c,ellipsisMask:p,newAxisMask:u,shrinkAxisMask:h}=s;An(r,"stridedSlice");const{finalShapeSparse:f,finalShape:m,isIdentity:g,sliceDim0:k,isSimpleSlice:I,begin:b,end:y,strides:N}=V.sliceInfo(r.shape,o,i,l,d,c,p,u,h);let T;if(g)T=Qs({inputs:{x:r},backend:a,attrs:{shape:m}});else if(k||I){e.assert(r.shape.length>=1,(()=>`Input must have rank at least 1, got: ${r.shape.length}`));const t=V.computeOutShape(b,y,N),n=hs({inputs:{x:r},backend:a,attrs:{begin:b,size:t}});T=Qs({inputs:{x:n},backend:a,attrs:{shape:m}}),a.disposeIntermediateTensorInfo(n)}else{const e=Ss(f,a.bufferSync(r),N,b);T=a.makeTensorInfo(m,e.dtype,e.values)}return T}};const ji={kernelName:mn,backendName:"cpu",kernelFunc:function(e){const{inputs:t,backend:n,attrs:a}=e,{separator:s,nGramWidths:r,leftPad:o,rightPad:i,padWidth:l,preserveShortSequences:d}=a,{data:c,dataSplits:p}=t,u=n.data.get(c.dataId).values,h=n.data.get(p.dataId).values,[f,m]=Fs(u,h,s,r,o,i,l,d);return[n.makeTensorInfo([f.length],"string",f),n.makeTensorInfo(p.shape,"int32",m)]}};const Yi={kernelName:gn,backendName:"cpu",kernelFunc:function(e){const{inputs:t,backend:n,attrs:a}=e,{skipEmpty:s}=a,{input:r,delimiter:o}=t;if("string"!==r.dtype)throw new Error("Input must be of datatype string");if(1!==r.shape.length)throw new Error(`Input must be a vector, got shape: ${r.shape}`);if(0!==o.shape.length)throw new Error(`Delimiter must be a scalar, got shape: ${o.shape}`);const i=n.data.get(r.dataId).values,l=n.data.get(o.dataId).values[0],[d,c,p]=Ms(i,l,s),u=c.length;return[n.makeTensorInfo([u,2],"int32",d),n.makeTensorInfo([u],"string",c),n.makeTensorInfo([2],"int32",new Int32Array(p))]}};const Ji={kernelName:kn,backendName:"cpu",kernelFunc:function(e){const{inputs:t,backend:n,attrs:a}=e,{numBuckets:s}=a,{input:r}=t;if("string"!==r.dtype)throw new Error("Input must be of datatype string");if(s<=0)throw new Error("Number of buckets must be at least 1");const o=As(n.data.get(r.dataId).values,s);return n.makeTensorInfo(r.shape,"int32",o)}},Qi={kernelName:In,backendName:"cpu",kernelFunc:ta(In,(e=>Math.tan(e)))},Xi=ta(bn,(e=>Math.tanh(e)));const el={kernelName:xn,backendName:"cpu",kernelFunc:function(t){const{inputs:n,attrs:a,backend:s}=t,{image:r,transforms:o}=n,{interpolation:i,fillMode:l,fillValue:d,outputShape:c}=a,[p,u,h,f]=r.shape,[m,g]=null!=c?c:[u,h],k=[p,m,g,f],I=e.computeStrides(r.shape),b=I[0],y=I[1],N=I[2],T=e.computeStrides(k),x=T[0],S=T[1],v=T[2],F=e.getTypedArrayFromDType(r.dtype,e.sizeFromShape(k));F.fill(d);const w=s.data.get(r.dataId).values,M=s.data.get(o.dataId).values;for(let e=0;en-1)if(n<=1)a=0;else{const e=2*n;a-=e*Math.trunc(a/e),a>=n&&(a=e-a-1)}return e.clamp(0,a,n-1)}(t,n);case"wrap":return function(t,n){let a=t;if(a<0)if(n<=1)a=0;else{const e=n-1;a+=n*(Math.trunc(-a/e)+1)}else if(a>n-1)if(n<=1)a=0;else{const e=n-1;a-=n*Math.trunc(a/e)}return e.clamp(0,a,n-1)}(t,n);case"nearest":return function(t,n){return e.clamp(0,t,n-1)}(t,n);default:return function(e,t){return e}(t)}}function nl(e,t,n,a,s,r,o,i,l,d,c){return 0<=i&&ia.disposeIntermediateTensorInfo(e))),u}},li];for(const e of rl)wn(e);export{En as MathBackendCPU,$s as shared,Os as version_cpu}; //# sourceMappingURL=tf-backend-cpu.fesm.min.js.map